id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
22,200
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
index_last_dim_with_indices
|
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
|
python
|
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
|
[
"def",
"index_last_dim_with_indices",
"(",
"x",
",",
"indices",
")",
":",
"assert",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"len",
"(",
"indices",
".",
"shape",
")",
"+",
"1",
"x_shape",
"=",
"shape_list",
"(",
"x",
")",
"vocab_size",
"=",
"x_shape",
"[",
"-",
"1",
"]",
"flat_x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"list_product",
"(",
"x_shape",
"[",
":",
"-",
"1",
"]",
")",
",",
"vocab_size",
"]",
")",
"flat_indices",
"=",
"tf",
".",
"reshape",
"(",
"indices",
",",
"[",
"list_product",
"(",
"x_shape",
"[",
":",
"-",
"1",
"]",
")",
"]",
")",
"idx",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"range",
"(",
"tf",
".",
"to_int64",
"(",
"shape_list",
"(",
"flat_indices",
")",
"[",
"0",
"]",
")",
")",
",",
"tf",
".",
"to_int64",
"(",
"flat_indices",
")",
"]",
",",
"axis",
"=",
"1",
")",
"flat_x_idx",
"=",
"tf",
".",
"gather_nd",
"(",
"flat_x",
",",
"idx",
")",
"x_idx",
"=",
"tf",
".",
"reshape",
"(",
"flat_x_idx",
",",
"x_shape",
"[",
":",
"-",
"1",
"]",
")",
"return",
"x_idx"
] |
Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
|
[
"Use",
"indices",
"to",
"index",
"into",
"the",
"last",
"axis",
"of",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3396-L3429
|
22,201
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
should_generate_summaries
|
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
|
python
|
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
|
[
"def",
"should_generate_summaries",
"(",
")",
":",
"name_scope",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"get_name_scope",
"(",
")",
"if",
"name_scope",
"and",
"\"while/\"",
"in",
"name_scope",
":",
"# Summaries don't work well within tf.while_loop()",
"return",
"False",
"if",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"reuse",
":",
"# Avoid generating separate summaries for different data shards",
"return",
"False",
"return",
"True"
] |
Is this an appropriate context to generate summaries.
Returns:
a boolean
|
[
"Is",
"this",
"an",
"appropriate",
"context",
"to",
"generate",
"summaries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3432-L3445
|
22,202
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
reshape_like
|
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
|
python
|
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
|
[
"def",
"reshape_like",
"(",
"a",
",",
"b",
")",
":",
"ret",
"=",
"tf",
".",
"reshape",
"(",
"a",
",",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"b",
")",
"[",
":",
"-",
"1",
"]",
",",
"tf",
".",
"shape",
"(",
"a",
")",
"[",
"-",
"1",
":",
"]",
"]",
",",
"0",
")",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"ret",
".",
"set_shape",
"(",
"b",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
":",
"-",
"1",
"]",
"+",
"a",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
":",
"]",
")",
"return",
"ret"
] |
Reshapes a to match the shape of b in all but the last dimension.
|
[
"Reshapes",
"a",
"to",
"match",
"the",
"shape",
"of",
"b",
"in",
"all",
"but",
"the",
"last",
"dimension",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3448-L3453
|
22,203
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
summarize_video
|
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
|
python
|
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
|
[
"def",
"summarize_video",
"(",
"video",
",",
"prefix",
",",
"max_outputs",
"=",
"1",
")",
":",
"video_shape",
"=",
"shape_list",
"(",
"video",
")",
"if",
"len",
"(",
"video_shape",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"\"Assuming videos given as tensors in the format \"",
"\"[batch, time, height, width, channels] but got one \"",
"\"of shape: %s\"",
"%",
"str",
"(",
"video_shape",
")",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"return",
"if",
"video",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
"]",
"is",
"None",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"\"%s_last_frame\"",
"%",
"prefix",
",",
"tf",
".",
"cast",
"(",
"video",
"[",
":",
",",
"-",
"1",
",",
":",
",",
":",
",",
":",
"]",
",",
"tf",
".",
"uint8",
")",
",",
"max_outputs",
"=",
"max_outputs",
")",
"else",
":",
"for",
"k",
"in",
"range",
"(",
"video_shape",
"[",
"1",
"]",
")",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"\"%s_frame_%d\"",
"%",
"(",
"prefix",
",",
"k",
")",
",",
"tf",
".",
"cast",
"(",
"video",
"[",
":",
",",
"k",
",",
":",
",",
":",
",",
":",
"]",
",",
"tf",
".",
"uint8",
")",
",",
"max_outputs",
"=",
"max_outputs",
")"
] |
Summarize the video using image summaries starting with prefix.
|
[
"Summarize",
"the",
"video",
"using",
"image",
"summaries",
"starting",
"with",
"prefix",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3456-L3475
|
22,204
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
cast_like
|
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
|
python
|
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
|
[
"def",
"cast_like",
"(",
"x",
",",
"y",
")",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x",
")",
"y",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"y",
")",
"if",
"x",
".",
"dtype",
".",
"base_dtype",
"==",
"y",
".",
"dtype",
".",
"base_dtype",
":",
"return",
"x",
"cast_x",
"=",
"tf",
".",
"cast",
"(",
"x",
",",
"y",
".",
"dtype",
")",
"if",
"cast_x",
".",
"device",
"!=",
"x",
".",
"device",
":",
"x_name",
"=",
"\"(eager Tensor)\"",
"try",
":",
"x_name",
"=",
"x",
".",
"name",
"except",
"AttributeError",
":",
"pass",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"Cast for %s may induce copy from '%s' to '%s'\"",
",",
"x_name",
",",
"x",
".",
"device",
",",
"cast_x",
".",
"device",
")",
"return",
"cast_x"
] |
Cast x to y's dtype, if necessary.
|
[
"Cast",
"x",
"to",
"y",
"s",
"dtype",
"if",
"necessary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3478-L3495
|
22,205
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
make_even_size
|
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
|
python
|
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
|
[
"def",
"make_even_size",
"(",
"x",
")",
":",
"x_shape",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"assert",
"len",
"(",
"x_shape",
")",
">",
"2",
",",
"\"Only 3+-dimensional tensors supported.\"",
"shape",
"=",
"[",
"dim",
"if",
"dim",
"is",
"not",
"None",
"else",
"-",
"1",
"for",
"dim",
"in",
"x_shape",
"]",
"new_shape",
"=",
"x_shape",
"# To make sure constant shapes remain constant.",
"if",
"x_shape",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"new_shape",
"[",
"1",
"]",
"=",
"2",
"*",
"int",
"(",
"math",
".",
"ceil",
"(",
"x_shape",
"[",
"1",
"]",
"*",
"0.5",
")",
")",
"if",
"x_shape",
"[",
"2",
"]",
"is",
"not",
"None",
":",
"new_shape",
"[",
"2",
"]",
"=",
"2",
"*",
"int",
"(",
"math",
".",
"ceil",
"(",
"x_shape",
"[",
"2",
"]",
"*",
"0.5",
")",
")",
"if",
"shape",
"[",
"1",
"]",
"%",
"2",
"==",
"0",
"and",
"shape",
"[",
"2",
"]",
"%",
"2",
"==",
"0",
":",
"return",
"x",
"if",
"shape",
"[",
"1",
"]",
"%",
"2",
"==",
"0",
":",
"x",
",",
"_",
"=",
"pad_to_same_length",
"(",
"x",
",",
"x",
",",
"final_length_divisible_by",
"=",
"2",
",",
"axis",
"=",
"2",
")",
"x",
".",
"set_shape",
"(",
"new_shape",
")",
"return",
"x",
"if",
"shape",
"[",
"2",
"]",
"%",
"2",
"==",
"0",
":",
"x",
",",
"_",
"=",
"pad_to_same_length",
"(",
"x",
",",
"x",
",",
"final_length_divisible_by",
"=",
"2",
",",
"axis",
"=",
"1",
")",
"x",
".",
"set_shape",
"(",
"new_shape",
")",
"return",
"x",
"x",
",",
"_",
"=",
"pad_to_same_length",
"(",
"x",
",",
"x",
",",
"final_length_divisible_by",
"=",
"2",
",",
"axis",
"=",
"1",
")",
"x",
",",
"_",
"=",
"pad_to_same_length",
"(",
"x",
",",
"x",
",",
"final_length_divisible_by",
"=",
"2",
",",
"axis",
"=",
"2",
")",
"x",
".",
"set_shape",
"(",
"new_shape",
")",
"return",
"x"
] |
Pad x to be even-sized on axis 1 and 2, but only if necessary.
|
[
"Pad",
"x",
"to",
"be",
"even",
"-",
"sized",
"on",
"axis",
"1",
"and",
"2",
"but",
"only",
"if",
"necessary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3498-L3521
|
22,206
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
instance_norm
|
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
|
python
|
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
|
[
"def",
"instance_norm",
"(",
"x",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"instance_norm\"",
")",
":",
"epsilon",
"=",
"1e-5",
"mean",
",",
"var",
"=",
"tf",
".",
"nn",
".",
"moments",
"(",
"x",
",",
"[",
"1",
",",
"2",
"]",
",",
"keep_dims",
"=",
"True",
")",
"scale",
"=",
"tf",
".",
"get_variable",
"(",
"\"scale\"",
",",
"[",
"x",
".",
"get_shape",
"(",
")",
"[",
"-",
"1",
"]",
"]",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"mean",
"=",
"1.0",
",",
"stddev",
"=",
"0.02",
")",
")",
"offset",
"=",
"tf",
".",
"get_variable",
"(",
"\"offset\"",
",",
"[",
"x",
".",
"get_shape",
"(",
")",
"[",
"-",
"1",
"]",
"]",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
")",
"out",
"=",
"scale",
"*",
"tf",
".",
"div",
"(",
"x",
"-",
"mean",
",",
"tf",
".",
"sqrt",
"(",
"var",
"+",
"epsilon",
")",
")",
"+",
"offset",
"return",
"out"
] |
Instance normalization layer.
|
[
"Instance",
"normalization",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3640-L3652
|
22,207
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
general_conv
|
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = layers().Conv2D(
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))(x)
if do_norm == "layer":
x = layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
|
python
|
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = layers().Conv2D(
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))(x)
if do_norm == "layer":
x = layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
|
[
"def",
"general_conv",
"(",
"x",
",",
"num_filters",
"=",
"64",
",",
"filter_size",
"=",
"7",
",",
"stride",
"=",
"1",
",",
"stddev",
"=",
"0.02",
",",
"padding",
"=",
"\"VALID\"",
",",
"name",
"=",
"\"conv\"",
",",
"do_norm",
"=",
"\"instance\"",
",",
"do_relu",
"=",
"True",
",",
"relufactor",
"=",
"0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"x",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"num_filters",
",",
"filter_size",
",",
"stride",
",",
"padding",
",",
"activation",
"=",
"None",
",",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")",
",",
"bias_initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.0",
")",
")",
"(",
"x",
")",
"if",
"do_norm",
"==",
"\"layer\"",
":",
"x",
"=",
"layer_norm",
"(",
"x",
")",
"elif",
"do_norm",
"==",
"\"instance\"",
":",
"x",
"=",
"instance_norm",
"(",
"x",
")",
"if",
"do_relu",
":",
"if",
"relufactor",
"==",
"0",
":",
"x",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"x",
",",
"\"relu\"",
")",
"else",
":",
"x",
"=",
"lrelu",
"(",
"x",
",",
"leak",
"=",
"relufactor",
")",
"return",
"x"
] |
Generalized convolution layer.
|
[
"Generalized",
"convolution",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3655-L3686
|
22,208
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
patch_discriminator
|
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
|
python
|
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
|
[
"def",
"patch_discriminator",
"(",
"x",
",",
"filters",
"=",
"64",
",",
"filter_size",
"=",
"5",
",",
"n",
"=",
"4",
",",
"name",
"=",
"\"patch_discrim\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"x_shape",
"=",
"shape_list",
"(",
"x",
")",
"spatial_dims",
"=",
"[",
"x_shape",
"[",
"1",
"]",
"//",
"4",
",",
"x_shape",
"[",
"2",
"]",
"//",
"4",
"]",
"x",
"=",
"tf",
".",
"random_crop",
"(",
"x",
",",
"[",
"x_shape",
"[",
"0",
"]",
"]",
"+",
"spatial_dims",
"+",
"[",
"x_shape",
"[",
"3",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"x",
"=",
"general_conv",
"(",
"x",
"=",
"x",
",",
"num_filters",
"=",
"filters",
"*",
"2",
"**",
"i",
",",
"filter_size",
"=",
"filter_size",
",",
"stride",
"=",
"2",
"if",
"i",
"!=",
"n",
"-",
"1",
"else",
"1",
",",
"stddev",
"=",
"0.02",
",",
"padding",
"=",
"\"SAME\"",
",",
"name",
"=",
"\"c%d\"",
"%",
"i",
",",
"do_norm",
"=",
"\"instance\"",
"if",
"i",
"!=",
"0",
"else",
"False",
",",
"do_relu",
"=",
"i",
"!=",
"n",
"-",
"1",
",",
"relufactor",
"=",
"0.2",
")",
"x",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
",",
"[",
"1",
",",
"2",
"]",
")",
"return",
"x"
] |
Patch descriminator.
|
[
"Patch",
"descriminator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3689-L3709
|
22,209
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
mean_with_attention
|
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name="mean_attn")(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name="mean_attn_final")(
tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]))
|
python
|
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name="mean_attn")(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name="mean_attn_final")(
tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]))
|
[
"def",
"mean_with_attention",
"(",
"x",
",",
"name",
",",
"num_heads",
"=",
"4",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"shape",
"=",
"shape_list",
"(",
"x",
")",
"m",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
",",
"[",
"1",
",",
"2",
"]",
")",
"a",
"=",
"layers",
"(",
")",
".",
"Dense",
"(",
"num_heads",
",",
"name",
"=",
"\"mean_attn\"",
")",
"(",
"x",
")",
"s",
"=",
"tf",
".",
"reshape",
"(",
"a",
",",
"[",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
",",
"num_heads",
"]",
")",
"s",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"s",
",",
"axis",
"=",
"1",
")",
"s",
"=",
"tf",
".",
"reshape",
"(",
"s",
",",
"shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"1",
",",
"num_heads",
"]",
")",
"am",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
")",
"*",
"s",
",",
"[",
"1",
",",
"2",
"]",
")",
"l",
"=",
"tf",
".",
"concat",
"(",
"[",
"am",
",",
"tf",
".",
"expand_dims",
"(",
"m",
",",
"axis",
"=",
"-",
"1",
")",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"layers",
"(",
")",
".",
"Dense",
"(",
"2",
"*",
"shape",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"\"mean_attn_final\"",
")",
"(",
"tf",
".",
"reshape",
"(",
"l",
",",
"[",
"shape",
"[",
"0",
"]",
",",
"(",
"num_heads",
"+",
"1",
")",
"*",
"shape",
"[",
"-",
"1",
"]",
"]",
")",
")"
] |
Mean and attention to reduce spatial dimensions.
|
[
"Mean",
"and",
"attention",
"to",
"reduce",
"spatial",
"dimensions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3712-L3724
|
22,210
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
single_discriminator
|
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
|
python
|
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
|
[
"def",
"single_discriminator",
"(",
"x",
",",
"filters",
"=",
"128",
",",
"kernel_size",
"=",
"8",
",",
"strides",
"=",
"4",
",",
"pure_mean",
"=",
"False",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"discriminator\"",
")",
":",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters",
",",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"\"SAME\"",
",",
"name",
"=",
"\"conv1\"",
")",
"(",
"x",
")",
"if",
"pure_mean",
":",
"net",
"=",
"tf",
".",
"reduce_mean",
"(",
"net",
",",
"[",
"1",
",",
"2",
"]",
")",
"else",
":",
"net",
"=",
"mean_with_attention",
"(",
"net",
",",
"\"mean_with_attention\"",
")",
"return",
"net"
] |
A simple single-layer convolutional discriminator.
|
[
"A",
"simple",
"single",
"-",
"layer",
"convolutional",
"discriminator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3727-L3737
|
22,211
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
double_discriminator
|
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = layers().Conv2D(
filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = layers().Conv2D(
filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x)
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
|
python
|
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = layers().Conv2D(
filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = layers().Conv2D(
filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x)
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
|
[
"def",
"double_discriminator",
"(",
"x",
",",
"filters1",
"=",
"128",
",",
"filters2",
"=",
"None",
",",
"kernel_size",
"=",
"8",
",",
"strides",
"=",
"4",
",",
"pure_mean",
"=",
"False",
")",
":",
"if",
"filters2",
"is",
"None",
":",
"filters2",
"=",
"4",
"*",
"filters1",
"with",
"tf",
".",
"variable_scope",
"(",
"\"discriminator\"",
")",
":",
"batch_size",
"=",
"shape_list",
"(",
"x",
")",
"[",
"0",
"]",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters1",
",",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"\"SAME\"",
",",
"name",
"=",
"\"conv1\"",
")",
"(",
"x",
")",
"if",
"pure_mean",
":",
"net1",
"=",
"tf",
".",
"reduce_mean",
"(",
"net",
",",
"[",
"1",
",",
"2",
"]",
")",
"else",
":",
"net1",
"=",
"mean_with_attention",
"(",
"net",
",",
"\"mean_with_attention1\"",
")",
"tf",
".",
"reshape",
"(",
"net",
",",
"[",
"batch_size",
",",
"-",
"1",
"]",
")",
"net",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"net",
")",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters2",
",",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"\"SAME\"",
",",
"name",
"=",
"\"conv2\"",
")",
"(",
"x",
")",
"if",
"pure_mean",
":",
"net2",
"=",
"tf",
".",
"reduce_mean",
"(",
"net",
",",
"[",
"1",
",",
"2",
"]",
")",
"else",
":",
"net2",
"=",
"mean_with_attention",
"(",
"net",
",",
"\"mean_with_attention2\"",
")",
"return",
"tf",
".",
"concat",
"(",
"[",
"net1",
",",
"net2",
"]",
",",
"axis",
"=",
"-",
"1",
")"
] |
A convolutional discriminator with 2 layers and concatenated output.
|
[
"A",
"convolutional",
"discriminator",
"with",
"2",
"layers",
"and",
"concatenated",
"output",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3740-L3761
|
22,212
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
upscale
|
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking
return tf.image.resize_images(inputs, (height * f, width * f), method)
|
python
|
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking
return tf.image.resize_images(inputs, (height * f, width * f), method)
|
[
"def",
"upscale",
"(",
"inputs",
",",
"f",
",",
"method",
"=",
"tf",
".",
"image",
".",
"ResizeMethod",
".",
"NEAREST_NEIGHBOR",
")",
":",
"height",
",",
"width",
"=",
"shape_list",
"(",
"inputs",
")",
"[",
"1",
":",
"3",
"]",
"# pylint: disable=unbalanced-tuple-unpacking",
"return",
"tf",
".",
"image",
".",
"resize_images",
"(",
"inputs",
",",
"(",
"height",
"*",
"f",
",",
"width",
"*",
"f",
")",
",",
"method",
")"
] |
Upscaling the image by a factor of f.
|
[
"Upscaling",
"the",
"image",
"by",
"a",
"factor",
"of",
"f",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3764-L3767
|
22,213
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
cyclegan_upsample
|
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = layers().Conv2DTranspose(
num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
|
python
|
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = layers().Conv2DTranspose(
num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
|
[
"def",
"cyclegan_upsample",
"(",
"net",
",",
"num_outputs",
",",
"stride",
",",
"method",
"=",
"\"conv2d_transpose\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"upconv\"",
")",
":",
"net_shape",
"=",
"tf",
".",
"shape",
"(",
"net",
")",
"height",
"=",
"net_shape",
"[",
"1",
"]",
"width",
"=",
"net_shape",
"[",
"2",
"]",
"# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a",
"# 3x3 \"valid\" convolution produce an output with the same dimension as the",
"# input.",
"spatial_pad_1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"if",
"method",
"==",
"\"nn_upsample_conv\"",
":",
"net",
"=",
"tf",
".",
"image",
".",
"resize_nearest_neighbor",
"(",
"net",
",",
"[",
"stride",
"[",
"0",
"]",
"*",
"height",
",",
"stride",
"[",
"1",
"]",
"*",
"width",
"]",
")",
"net",
"=",
"tf",
".",
"pad",
"(",
"net",
",",
"spatial_pad_1",
",",
"\"REFLECT\"",
")",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"num_outputs",
",",
"(",
"3",
",",
"3",
")",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"(",
"net",
")",
"elif",
"method",
"==",
"\"bilinear_upsample_conv\"",
":",
"net",
"=",
"tf",
".",
"image",
".",
"resize_bilinear",
"(",
"net",
",",
"[",
"stride",
"[",
"0",
"]",
"*",
"height",
",",
"stride",
"[",
"1",
"]",
"*",
"width",
"]",
")",
"net",
"=",
"tf",
".",
"pad",
"(",
"net",
",",
"spatial_pad_1",
",",
"\"REFLECT\"",
")",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"num_outputs",
",",
"(",
"3",
",",
"3",
")",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"(",
"net",
")",
"elif",
"method",
"==",
"\"conv2d_transpose\"",
":",
"# This corrects 1 pixel offset for images with even width and height.",
"# conv2d is left aligned and conv2d_transpose is right aligned for even",
"# sized images (while doing \"SAME\" padding).",
"# Note: This doesn\"t reflect actual model in paper.",
"net",
"=",
"layers",
"(",
")",
".",
"Conv2DTranspose",
"(",
"num_outputs",
",",
"(",
"3",
",",
"3",
")",
",",
"strides",
"=",
"stride",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"(",
"net",
")",
"net",
"=",
"net",
"[",
":",
",",
"1",
":",
",",
"1",
":",
",",
":",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown method: [%s]\"",
"%",
"method",
")",
"return",
"net"
] |
Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
|
[
"Upsamples",
"the",
"given",
"inputs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3788-L3841
|
22,214
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
td_conv
|
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
|
python
|
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
|
[
"def",
"td_conv",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"targeting_count",
",",
"targeting_fn",
",",
"keep_prob",
",",
"is_training",
",",
"do_prune",
"=",
"True",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"padding",
"=",
"\"valid\"",
",",
"data_format",
"=",
"\"channels_last\"",
",",
"dilation_rate",
"=",
"(",
"1",
",",
"1",
")",
",",
"activation",
"=",
"None",
",",
"use_bias",
"=",
"True",
",",
"kernel_initializer",
"=",
"None",
",",
"bias_initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"name",
"=",
"None",
",",
"reuse",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"td_conv\"",
",",
"reuse",
"=",
"reuse",
")",
":",
"nhwc",
"=",
"data_format",
"==",
"\"channels_last\"",
"in_dim",
"=",
"shape_list",
"(",
"inputs",
")",
"[",
"-",
"1",
"]",
"if",
"nhwc",
"else",
"shape_list",
"(",
"inputs",
")",
"[",
"1",
"]",
"kernel_shape",
"=",
"[",
"kernel_size",
",",
"kernel_size",
",",
"in_dim",
",",
"filters",
"]",
"w",
"=",
"tf",
".",
"get_variable",
"(",
"\"DW\"",
",",
"shape",
"=",
"kernel_shape",
",",
"initializer",
"=",
"kernel_initializer",
")",
"if",
"use_bias",
":",
"b",
"=",
"tf",
".",
"get_variable",
"(",
"\"b\"",
",",
"shape",
"=",
"[",
"filters",
"]",
",",
"initializer",
"=",
"bias_initializer",
")",
"if",
"keep_prob",
"<",
"1.0",
":",
"w",
"=",
"targeted_dropout",
"(",
"w",
",",
"targeting_count",
",",
"keep_prob",
",",
"targeting_fn",
",",
"is_training",
",",
"do_prune",
"=",
"do_prune",
")",
"if",
"isinstance",
"(",
"strides",
",",
"int",
")",
":",
"strides",
"=",
"[",
"strides",
",",
"strides",
"]",
"if",
"isinstance",
"(",
"dilation_rate",
",",
"int",
")",
":",
"dilation_rate",
"=",
"[",
"dilation_rate",
",",
"dilation_rate",
"]",
"if",
"nhwc",
":",
"strides",
"=",
"[",
"1",
",",
"strides",
"[",
"0",
"]",
",",
"strides",
"[",
"1",
"]",
",",
"1",
"]",
"dilation_rate",
"=",
"[",
"1",
",",
"dilation_rate",
"[",
"0",
"]",
",",
"dilation_rate",
"[",
"1",
"]",
",",
"1",
"]",
"else",
":",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"strides",
"[",
"0",
"]",
",",
"strides",
"[",
"1",
"]",
"]",
"dilation_rate",
"=",
"[",
"1",
",",
"1",
",",
"dilation_rate",
"[",
"0",
"]",
",",
"dilation_rate",
"[",
"1",
"]",
"]",
"y",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"inputs",
",",
"w",
",",
"strides",
",",
"padding",
",",
"data_format",
"=",
"\"NHWC\"",
"if",
"nhwc",
"else",
"\"NCHW\"",
",",
"dilations",
"=",
"dilation_rate",
",",
"name",
"=",
"None",
")",
"if",
"use_bias",
":",
"y",
"+=",
"b",
"if",
"activation",
":",
"y",
"=",
"activation",
"(",
"y",
")",
"return",
"y"
] |
Apply targeted dropout to the weights of a convolution.
|
[
"Apply",
"targeted",
"dropout",
"to",
"the",
"weights",
"of",
"a",
"convolution",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3873-L3938
|
22,215
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
targeted_dropout
|
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
|
python
|
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
|
[
"def",
"targeted_dropout",
"(",
"inputs",
",",
"k",
",",
"keep_prob",
",",
"targeting_fn",
",",
"is_training",
",",
"do_prune",
"=",
"False",
")",
":",
"if",
"not",
"is_training",
"and",
"do_prune",
":",
"k",
"=",
"tf",
".",
"round",
"(",
"to_float",
"(",
"k",
")",
"*",
"to_float",
"(",
"1.",
"-",
"keep_prob",
")",
")",
"mask",
"=",
"targeting_fn",
"(",
"inputs",
",",
"k",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"mask",
",",
"inputs",
".",
"dtype",
")",
"if",
"is_training",
":",
"return",
"inputs",
"*",
"(",
"1",
"-",
"mask",
")",
"+",
"tf",
".",
"nn",
".",
"dropout",
"(",
"inputs",
",",
"keep_prob",
")",
"*",
"mask",
"elif",
"do_prune",
":",
"return",
"inputs",
"*",
"(",
"1",
"-",
"mask",
")",
"else",
":",
"return",
"inputs"
] |
Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
|
[
"Applies",
"targeted",
"dropout",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3941-L3982
|
22,216
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
FactoredTensor.to_tensor
|
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
|
python
|
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
|
[
"def",
"to_tensor",
"(",
"self",
")",
":",
"a_shape",
"=",
"shape_list",
"(",
"self",
".",
"a",
")",
"b_shape",
"=",
"shape_list",
"(",
"self",
".",
"b",
")",
"inner_dim",
"=",
"b_shape",
"[",
"1",
"]",
"result_dim",
"=",
"b_shape",
"[",
"0",
"]",
"flat_a",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"a",
",",
"[",
"-",
"1",
",",
"inner_dim",
"]",
")",
"product",
"=",
"tf",
".",
"matmul",
"(",
"flat_a",
",",
"self",
".",
"b",
",",
"transpose_b",
"=",
"True",
")",
"product_shape",
"=",
"a_shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"result_dim",
"]",
"product",
"=",
"tf",
".",
"reshape",
"(",
"product",
",",
"product_shape",
")",
"product",
".",
"set_shape",
"(",
"self",
".",
"a",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"self",
".",
"b",
".",
"get_shape",
"(",
")",
"[",
"0",
"]",
"]",
")",
"return",
"product"
] |
Convert to Tensor.
|
[
"Convert",
"to",
"Tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2601-L2613
|
22,217
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
WeightNorm._compute_weights
|
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
|
python
|
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
|
[
"def",
"_compute_weights",
"(",
"self",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"compute_weights\"",
")",
":",
"self",
".",
"layer",
".",
"kernel",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"self",
".",
"layer",
".",
"v",
",",
"axis",
"=",
"self",
".",
"norm_axes",
")",
"*",
"self",
".",
"layer",
".",
"g"
] |
Generate weights with normalization.
|
[
"Generate",
"weights",
"with",
"normalization",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4089-L4093
|
22,218
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
WeightNorm._init_norm
|
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
|
python
|
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
|
[
"def",
"_init_norm",
"(",
"self",
",",
"weights",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"init_norm\"",
")",
":",
"flat",
"=",
"tf",
".",
"reshape",
"(",
"weights",
",",
"[",
"-",
"1",
",",
"self",
".",
"layer_depth",
"]",
")",
"return",
"tf",
".",
"reshape",
"(",
"tf",
".",
"norm",
"(",
"flat",
",",
"axis",
"=",
"0",
")",
",",
"(",
"self",
".",
"layer_depth",
",",
")",
")"
] |
Set the norm of the weight vector.
|
[
"Set",
"the",
"norm",
"of",
"the",
"weight",
"vector",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4095-L4099
|
22,219
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
WeightNorm._data_dep_init
|
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
|
python
|
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
|
[
"def",
"_data_dep_init",
"(",
"self",
",",
"inputs",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"data_dep_init\"",
")",
":",
"# Generate data dependent init values",
"activation",
"=",
"self",
".",
"layer",
".",
"activation",
"self",
".",
"layer",
".",
"activation",
"=",
"None",
"x_init",
"=",
"self",
".",
"layer",
".",
"call",
"(",
"inputs",
")",
"m_init",
",",
"v_init",
"=",
"tf",
".",
"moments",
"(",
"x_init",
",",
"self",
".",
"norm_axes",
")",
"scale_init",
"=",
"1.",
"/",
"tf",
".",
"sqrt",
"(",
"v_init",
"+",
"1e-10",
")",
"# Assign data dependent init values",
"self",
".",
"layer",
".",
"g",
"=",
"self",
".",
"layer",
".",
"g",
"*",
"scale_init",
"self",
".",
"layer",
".",
"bias",
"=",
"(",
"-",
"m_init",
"*",
"scale_init",
")",
"self",
".",
"layer",
".",
"activation",
"=",
"activation",
"self",
".",
"initialized",
"=",
"True"
] |
Data dependent initialization for eager execution.
|
[
"Data",
"dependent",
"initialization",
"for",
"eager",
"execution",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4101-L4116
|
22,220
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
WeightNorm.build
|
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
|
python
|
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
|
[
"def",
"build",
"(",
"self",
",",
"input_shape",
"=",
"None",
")",
":",
"input_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"input_shape",
")",
".",
"as_list",
"(",
")",
"self",
".",
"input_spec",
"=",
"layers",
"(",
")",
".",
"InputSpec",
"(",
"shape",
"=",
"input_shape",
")",
"if",
"not",
"self",
".",
"layer",
".",
"built",
":",
"self",
".",
"layer",
".",
"build",
"(",
"input_shape",
")",
"self",
".",
"layer",
".",
"built",
"=",
"False",
"if",
"not",
"hasattr",
"(",
"self",
".",
"layer",
",",
"\"kernel\"",
")",
":",
"raise",
"ValueError",
"(",
"\"`WeightNorm` must wrap a layer that\"",
"\" contains a `kernel` for weights\"",
")",
"# The kernel's filter or unit dimension is -1",
"self",
".",
"layer_depth",
"=",
"int",
"(",
"self",
".",
"layer",
".",
"kernel",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"self",
".",
"norm_axes",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"layer",
".",
"kernel",
".",
"shape",
".",
"ndims",
"-",
"1",
")",
")",
"self",
".",
"layer",
".",
"v",
"=",
"self",
".",
"layer",
".",
"kernel",
"self",
".",
"layer",
".",
"g",
"=",
"self",
".",
"layer",
".",
"add_variable",
"(",
"name",
"=",
"\"g\"",
",",
"shape",
"=",
"(",
"self",
".",
"layer_depth",
",",
")",
",",
"initializer",
"=",
"tf",
".",
"ones_initializer",
",",
"dtype",
"=",
"self",
".",
"layer",
".",
"kernel",
".",
"dtype",
",",
"trainable",
"=",
"True",
")",
"# with ops.control_dependencies([self.layer.g.assign(",
"# self._init_norm(self.layer.v))]):",
"# self._compute_weights()",
"self",
".",
"_compute_weights",
"(",
")",
"self",
".",
"layer",
".",
"built",
"=",
"True",
"super",
"(",
"WeightNorm",
",",
"self",
")",
".",
"build",
"(",
")",
"self",
".",
"built",
"=",
"True"
] |
Build `Layer`.
|
[
"Build",
"Layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4118-L4151
|
22,221
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
WeightNorm.call
|
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
|
python
|
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
|
[
"def",
"call",
"(",
"self",
",",
"inputs",
")",
":",
"# if context.executing_eagerly():",
"# if not self.initialized:",
"# self._data_dep_init(inputs)",
"self",
".",
"_compute_weights",
"(",
")",
"# Recompute weights for each forward pass",
"output",
"=",
"self",
".",
"layer",
".",
"call",
"(",
"inputs",
")",
"return",
"output"
] |
Call `Layer`.
|
[
"Call",
"Layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4153-L4161
|
22,222
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
compute_mean_reward
|
def compute_mean_reward(rollouts, clipped):
"""Calculate mean rewards from given epoch."""
reward_name = "reward" if clipped else "unclipped_reward"
rewards = []
for rollout in rollouts:
if rollout[-1].done:
rollout_reward = sum(getattr(frame, reward_name) for frame in rollout)
rewards.append(rollout_reward)
if rewards:
mean_rewards = np.mean(rewards)
else:
mean_rewards = 0
return mean_rewards
|
python
|
def compute_mean_reward(rollouts, clipped):
"""Calculate mean rewards from given epoch."""
reward_name = "reward" if clipped else "unclipped_reward"
rewards = []
for rollout in rollouts:
if rollout[-1].done:
rollout_reward = sum(getattr(frame, reward_name) for frame in rollout)
rewards.append(rollout_reward)
if rewards:
mean_rewards = np.mean(rewards)
else:
mean_rewards = 0
return mean_rewards
|
[
"def",
"compute_mean_reward",
"(",
"rollouts",
",",
"clipped",
")",
":",
"reward_name",
"=",
"\"reward\"",
"if",
"clipped",
"else",
"\"unclipped_reward\"",
"rewards",
"=",
"[",
"]",
"for",
"rollout",
"in",
"rollouts",
":",
"if",
"rollout",
"[",
"-",
"1",
"]",
".",
"done",
":",
"rollout_reward",
"=",
"sum",
"(",
"getattr",
"(",
"frame",
",",
"reward_name",
")",
"for",
"frame",
"in",
"rollout",
")",
"rewards",
".",
"append",
"(",
"rollout_reward",
")",
"if",
"rewards",
":",
"mean_rewards",
"=",
"np",
".",
"mean",
"(",
"rewards",
")",
"else",
":",
"mean_rewards",
"=",
"0",
"return",
"mean_rewards"
] |
Calculate mean rewards from given epoch.
|
[
"Calculate",
"mean",
"rewards",
"from",
"given",
"epoch",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L45-L57
|
22,223
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
evaluate_single_config
|
def evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir,
eval_fn=_eval_fn_with_learner
):
"""Evaluate the PPO agent in the real environment."""
tf.logging.info("Evaluating metric %s", get_metric_name(
sampling_temp, max_num_noops, clipped=False
))
eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
env = setup_env(
hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops,
rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps,
env_name=hparams.rl_env_name)
env.start_new_epoch(0)
eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp)
rollouts = env.current_epoch_rollouts()
env.close()
return tuple(
compute_mean_reward(rollouts, clipped) for clipped in (True, False)
)
|
python
|
def evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir,
eval_fn=_eval_fn_with_learner
):
"""Evaluate the PPO agent in the real environment."""
tf.logging.info("Evaluating metric %s", get_metric_name(
sampling_temp, max_num_noops, clipped=False
))
eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
env = setup_env(
hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops,
rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps,
env_name=hparams.rl_env_name)
env.start_new_epoch(0)
eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp)
rollouts = env.current_epoch_rollouts()
env.close()
return tuple(
compute_mean_reward(rollouts, clipped) for clipped in (True, False)
)
|
[
"def",
"evaluate_single_config",
"(",
"hparams",
",",
"sampling_temp",
",",
"max_num_noops",
",",
"agent_model_dir",
",",
"eval_fn",
"=",
"_eval_fn_with_learner",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Evaluating metric %s\"",
",",
"get_metric_name",
"(",
"sampling_temp",
",",
"max_num_noops",
",",
"clipped",
"=",
"False",
")",
")",
"eval_hparams",
"=",
"trainer_lib",
".",
"create_hparams",
"(",
"hparams",
".",
"base_algo_params",
")",
"env",
"=",
"setup_env",
"(",
"hparams",
",",
"batch_size",
"=",
"hparams",
".",
"eval_batch_size",
",",
"max_num_noops",
"=",
"max_num_noops",
",",
"rl_env_max_episode_steps",
"=",
"hparams",
".",
"eval_rl_env_max_episode_steps",
",",
"env_name",
"=",
"hparams",
".",
"rl_env_name",
")",
"env",
".",
"start_new_epoch",
"(",
"0",
")",
"eval_fn",
"(",
"env",
",",
"hparams",
",",
"eval_hparams",
",",
"agent_model_dir",
",",
"sampling_temp",
")",
"rollouts",
"=",
"env",
".",
"current_epoch_rollouts",
"(",
")",
"env",
".",
"close",
"(",
")",
"return",
"tuple",
"(",
"compute_mean_reward",
"(",
"rollouts",
",",
"clipped",
")",
"for",
"clipped",
"in",
"(",
"True",
",",
"False",
")",
")"
] |
Evaluate the PPO agent in the real environment.
|
[
"Evaluate",
"the",
"PPO",
"agent",
"in",
"the",
"real",
"environment",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L77-L97
|
22,224
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
evaluate_all_configs
|
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics
|
python
|
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics
|
[
"def",
"evaluate_all_configs",
"(",
"hparams",
",",
"agent_model_dir",
",",
"eval_fn",
"=",
"_eval_fn_with_learner",
")",
":",
"metrics",
"=",
"{",
"}",
"# Iterate over all combinations of sampling temperatures and whether to do",
"# initial no-ops.",
"for",
"sampling_temp",
"in",
"hparams",
".",
"eval_sampling_temps",
":",
"# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.",
"for",
"max_num_noops",
"in",
"set",
"(",
"[",
"hparams",
".",
"eval_max_num_noops",
",",
"0",
"]",
")",
":",
"scores",
"=",
"evaluate_single_config",
"(",
"hparams",
",",
"sampling_temp",
",",
"max_num_noops",
",",
"agent_model_dir",
",",
"eval_fn",
")",
"for",
"(",
"score",
",",
"clipped",
")",
"in",
"zip",
"(",
"scores",
",",
"(",
"True",
",",
"False",
")",
")",
":",
"metric_name",
"=",
"get_metric_name",
"(",
"sampling_temp",
",",
"max_num_noops",
",",
"clipped",
")",
"metrics",
"[",
"metric_name",
"]",
"=",
"score",
"return",
"metrics"
] |
Evaluate the agent with multiple eval configurations.
|
[
"Evaluate",
"the",
"agent",
"with",
"multiple",
"eval",
"configurations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L100-L117
|
22,225
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
summarize_metrics
|
def summarize_metrics(eval_metrics_writer, metrics, epoch):
"""Write metrics to summary."""
for (name, value) in six.iteritems(metrics):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
eval_metrics_writer.add_summary(summary, epoch)
eval_metrics_writer.flush()
|
python
|
def summarize_metrics(eval_metrics_writer, metrics, epoch):
"""Write metrics to summary."""
for (name, value) in six.iteritems(metrics):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
eval_metrics_writer.add_summary(summary, epoch)
eval_metrics_writer.flush()
|
[
"def",
"summarize_metrics",
"(",
"eval_metrics_writer",
",",
"metrics",
",",
"epoch",
")",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"six",
".",
"iteritems",
"(",
"metrics",
")",
":",
"summary",
"=",
"tf",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"name",
",",
"simple_value",
"=",
"value",
")",
"eval_metrics_writer",
".",
"add_summary",
"(",
"summary",
",",
"epoch",
")",
"eval_metrics_writer",
".",
"flush",
"(",
")"
] |
Write metrics to summary.
|
[
"Write",
"metrics",
"to",
"summary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L252-L258
|
22,226
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
full_game_name
|
def full_game_name(short_name):
"""CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4"
"""
camel_game_name = misc_utils.snakecase_to_camelcase(short_name)
full_name = camel_game_name + ATARI_GAME_MODE
return full_name
|
python
|
def full_game_name(short_name):
"""CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4"
"""
camel_game_name = misc_utils.snakecase_to_camelcase(short_name)
full_name = camel_game_name + ATARI_GAME_MODE
return full_name
|
[
"def",
"full_game_name",
"(",
"short_name",
")",
":",
"camel_game_name",
"=",
"misc_utils",
".",
"snakecase_to_camelcase",
"(",
"short_name",
")",
"full_name",
"=",
"camel_game_name",
"+",
"ATARI_GAME_MODE",
"return",
"full_name"
] |
CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4"
|
[
"CamelCase",
"game",
"name",
"with",
"mode",
"suffix",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L270-L281
|
22,227
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
update_hparams_from_hparams
|
def update_hparams_from_hparams(target_hparams, source_hparams, prefix):
"""Copy a subset of hparams to target_hparams."""
for (param_name, param_value) in six.iteritems(source_hparams.values()):
if param_name.startswith(prefix):
target_hparams.set_hparam(param_name[len(prefix):], param_value)
|
python
|
def update_hparams_from_hparams(target_hparams, source_hparams, prefix):
"""Copy a subset of hparams to target_hparams."""
for (param_name, param_value) in six.iteritems(source_hparams.values()):
if param_name.startswith(prefix):
target_hparams.set_hparam(param_name[len(prefix):], param_value)
|
[
"def",
"update_hparams_from_hparams",
"(",
"target_hparams",
",",
"source_hparams",
",",
"prefix",
")",
":",
"for",
"(",
"param_name",
",",
"param_value",
")",
"in",
"six",
".",
"iteritems",
"(",
"source_hparams",
".",
"values",
"(",
")",
")",
":",
"if",
"param_name",
".",
"startswith",
"(",
"prefix",
")",
":",
"target_hparams",
".",
"set_hparam",
"(",
"param_name",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
",",
"param_value",
")"
] |
Copy a subset of hparams to target_hparams.
|
[
"Copy",
"a",
"subset",
"of",
"hparams",
"to",
"target_hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L316-L320
|
22,228
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
random_rollout_subsequences
|
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts."""
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)]
|
python
|
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts."""
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)]
|
[
"def",
"random_rollout_subsequences",
"(",
"rollouts",
",",
"num_subsequences",
",",
"subsequence_length",
")",
":",
"def",
"choose_subsequence",
"(",
")",
":",
"# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over",
"# frames and not rollouts.",
"rollout",
"=",
"random",
".",
"choice",
"(",
"rollouts",
")",
"try",
":",
"from_index",
"=",
"random",
".",
"randrange",
"(",
"len",
"(",
"rollout",
")",
"-",
"subsequence_length",
"+",
"1",
")",
"except",
"ValueError",
":",
"# Rollout too short; repeat.",
"return",
"choose_subsequence",
"(",
")",
"return",
"rollout",
"[",
"from_index",
":",
"(",
"from_index",
"+",
"subsequence_length",
")",
"]",
"return",
"[",
"choose_subsequence",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"num_subsequences",
")",
"]"
] |
Chooses a random frame sequence of given length from a set of rollouts.
|
[
"Chooses",
"a",
"random",
"frame",
"sequence",
"of",
"given",
"length",
"from",
"a",
"set",
"of",
"rollouts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L323-L336
|
22,229
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
make_initial_frame_chooser
|
def make_initial_frame_chooser(
real_env, frame_stack_size, simulation_random_starts,
simulation_flip_first_random_for_beginning,
split=tf.estimator.ModeKeys.TRAIN,
):
"""Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames.
"""
initial_frame_rollouts = real_env.current_epoch_rollouts(
split=split, minimal_rollout_frames=frame_stack_size,
)
def initial_frame_chooser(batch_size):
"""Frame chooser."""
deterministic_initial_frames =\
initial_frame_rollouts[0][:frame_stack_size]
if not simulation_random_starts:
# Deterministic starts: repeat first frames from the first rollout.
initial_frames = [deterministic_initial_frames] * batch_size
else:
# Random starts: choose random initial frames from random rollouts.
initial_frames = random_rollout_subsequences(
initial_frame_rollouts, batch_size, frame_stack_size
)
if simulation_flip_first_random_for_beginning:
# Flip first entry in the batch for deterministic initial frames.
initial_frames[0] = deterministic_initial_frames
return np.stack([
[frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension
for initial_frame_stack in initial_frames
])
return initial_frame_chooser
|
python
|
def make_initial_frame_chooser(
real_env, frame_stack_size, simulation_random_starts,
simulation_flip_first_random_for_beginning,
split=tf.estimator.ModeKeys.TRAIN,
):
"""Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames.
"""
initial_frame_rollouts = real_env.current_epoch_rollouts(
split=split, minimal_rollout_frames=frame_stack_size,
)
def initial_frame_chooser(batch_size):
"""Frame chooser."""
deterministic_initial_frames =\
initial_frame_rollouts[0][:frame_stack_size]
if not simulation_random_starts:
# Deterministic starts: repeat first frames from the first rollout.
initial_frames = [deterministic_initial_frames] * batch_size
else:
# Random starts: choose random initial frames from random rollouts.
initial_frames = random_rollout_subsequences(
initial_frame_rollouts, batch_size, frame_stack_size
)
if simulation_flip_first_random_for_beginning:
# Flip first entry in the batch for deterministic initial frames.
initial_frames[0] = deterministic_initial_frames
return np.stack([
[frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension
for initial_frame_stack in initial_frames
])
return initial_frame_chooser
|
[
"def",
"make_initial_frame_chooser",
"(",
"real_env",
",",
"frame_stack_size",
",",
"simulation_random_starts",
",",
"simulation_flip_first_random_for_beginning",
",",
"split",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
",",
")",
":",
"initial_frame_rollouts",
"=",
"real_env",
".",
"current_epoch_rollouts",
"(",
"split",
"=",
"split",
",",
"minimal_rollout_frames",
"=",
"frame_stack_size",
",",
")",
"def",
"initial_frame_chooser",
"(",
"batch_size",
")",
":",
"\"\"\"Frame chooser.\"\"\"",
"deterministic_initial_frames",
"=",
"initial_frame_rollouts",
"[",
"0",
"]",
"[",
":",
"frame_stack_size",
"]",
"if",
"not",
"simulation_random_starts",
":",
"# Deterministic starts: repeat first frames from the first rollout.",
"initial_frames",
"=",
"[",
"deterministic_initial_frames",
"]",
"*",
"batch_size",
"else",
":",
"# Random starts: choose random initial frames from random rollouts.",
"initial_frames",
"=",
"random_rollout_subsequences",
"(",
"initial_frame_rollouts",
",",
"batch_size",
",",
"frame_stack_size",
")",
"if",
"simulation_flip_first_random_for_beginning",
":",
"# Flip first entry in the batch for deterministic initial frames.",
"initial_frames",
"[",
"0",
"]",
"=",
"deterministic_initial_frames",
"return",
"np",
".",
"stack",
"(",
"[",
"[",
"frame",
".",
"observation",
".",
"decode",
"(",
")",
"for",
"frame",
"in",
"initial_frame_stack",
"]",
"# pylint: disable=g-complex-comprehension",
"for",
"initial_frame_stack",
"in",
"initial_frames",
"]",
")",
"return",
"initial_frame_chooser"
] |
Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames.
|
[
"Make",
"frame",
"chooser",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L339-L382
|
22,230
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
absolute_hinge_difference
|
def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):
"""Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array
"""
diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int)
return np.maximum(diff - min_diff, 0).astype(dtype)
|
python
|
def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):
"""Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array
"""
diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int)
return np.maximum(diff - min_diff, 0).astype(dtype)
|
[
"def",
"absolute_hinge_difference",
"(",
"arr1",
",",
"arr2",
",",
"min_diff",
"=",
"10",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
":",
"diff",
"=",
"np",
".",
"abs",
"(",
"arr1",
".",
"astype",
"(",
"np",
".",
"int",
")",
"-",
"arr2",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"return",
"np",
".",
"maximum",
"(",
"diff",
"-",
"min_diff",
",",
"0",
")",
".",
"astype",
"(",
"dtype",
")"
] |
Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array
|
[
"Point",
"-",
"wise",
"hinge",
"loss",
"-",
"like",
"difference",
"between",
"arrays",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L385-L398
|
22,231
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
augment_observation
|
def augment_observation(
observation, reward, cum_reward, frame_index, bar_color=None,
header_height=27
):
"""Augments an observation with debug info."""
img = PIL_Image().new(
"RGB", (observation.shape[1], header_height,)
)
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "f:{:3}".format(int(frame_index)),
fill=(255, 0, 0)
)
header = np.copy(np.asarray(img))
del img
if bar_color is not None:
header[0, :, :] = bar_color
return np.concatenate([header, observation], axis=0)
|
python
|
def augment_observation(
observation, reward, cum_reward, frame_index, bar_color=None,
header_height=27
):
"""Augments an observation with debug info."""
img = PIL_Image().new(
"RGB", (observation.shape[1], header_height,)
)
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "f:{:3}".format(int(frame_index)),
fill=(255, 0, 0)
)
header = np.copy(np.asarray(img))
del img
if bar_color is not None:
header[0, :, :] = bar_color
return np.concatenate([header, observation], axis=0)
|
[
"def",
"augment_observation",
"(",
"observation",
",",
"reward",
",",
"cum_reward",
",",
"frame_index",
",",
"bar_color",
"=",
"None",
",",
"header_height",
"=",
"27",
")",
":",
"img",
"=",
"PIL_Image",
"(",
")",
".",
"new",
"(",
"\"RGB\"",
",",
"(",
"observation",
".",
"shape",
"[",
"1",
"]",
",",
"header_height",
",",
")",
")",
"draw",
"=",
"PIL_ImageDraw",
"(",
")",
".",
"Draw",
"(",
"img",
")",
"draw",
".",
"text",
"(",
"(",
"1",
",",
"0",
")",
",",
"\"c:{:3}, r:{:3}\"",
".",
"format",
"(",
"int",
"(",
"cum_reward",
")",
",",
"int",
"(",
"reward",
")",
")",
",",
"fill",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
")",
"draw",
".",
"text",
"(",
"(",
"1",
",",
"15",
")",
",",
"\"f:{:3}\"",
".",
"format",
"(",
"int",
"(",
"frame_index",
")",
")",
",",
"fill",
"=",
"(",
"255",
",",
"0",
",",
"0",
")",
")",
"header",
"=",
"np",
".",
"copy",
"(",
"np",
".",
"asarray",
"(",
"img",
")",
")",
"del",
"img",
"if",
"bar_color",
"is",
"not",
"None",
":",
"header",
"[",
"0",
",",
":",
",",
":",
"]",
"=",
"bar_color",
"return",
"np",
".",
"concatenate",
"(",
"[",
"header",
",",
"observation",
"]",
",",
"axis",
"=",
"0",
")"
] |
Augments an observation with debug info.
|
[
"Augments",
"an",
"observation",
"with",
"debug",
"info",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L402-L423
|
22,232
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/rl_utils.py
|
run_rollouts
|
def run_rollouts(
env, agent, initial_observations, step_limit=None, discount_factor=1.0,
log_every_steps=None, video_writers=(), color_bar=False,
many_rollouts_from_each_env=False
):
"""Runs a batch of rollouts from given initial observations."""
assert step_limit is not None or not many_rollouts_from_each_env, (
"When collecting many rollouts from each environment, time limit must "
"be set."
)
num_dones = 0
first_dones = np.array([False] * env.batch_size)
observations = initial_observations
step_index = 0
cum_rewards = np.zeros(env.batch_size)
for (video_writer, obs_stack) in zip(video_writers, initial_observations):
for (i, ob) in enumerate(obs_stack):
debug_frame = augment_observation(
ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1),
bar_color=((0, 255, 0) if color_bar else None)
)
video_writer.write(debug_frame)
def proceed():
if step_index < step_limit:
return num_dones < env.batch_size or many_rollouts_from_each_env
else:
return False
while proceed():
act_kwargs = {}
if agent.needs_env_state:
act_kwargs["env_state"] = env.state
actions = agent.act(observations, **act_kwargs)
(observations, rewards, dones) = env.step(actions)
observations = list(observations)
now_done_indices = []
for (i, done) in enumerate(dones):
if done and (not first_dones[i] or many_rollouts_from_each_env):
now_done_indices.append(i)
first_dones[i] = True
num_dones += 1
if now_done_indices:
# Unless many_rollouts_from_each_env, reset only envs done the first time
# in this timestep to ensure that we collect exactly 1 rollout from each
# env.
reset_observations = env.reset(now_done_indices)
for (i, observation) in zip(now_done_indices, reset_observations):
observations[i] = observation
observations = np.array(observations)
cum_rewards[~first_dones] = (
cum_rewards[~first_dones] * discount_factor + rewards[~first_dones]
)
step_index += 1
for (video_writer, obs_stack, reward, cum_reward, done) in zip(
video_writers, observations, rewards, cum_rewards, first_dones
):
if done:
continue
ob = obs_stack[-1]
debug_frame = augment_observation(
ob, reward=reward, cum_reward=cum_reward,
frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None)
)
video_writer.write(debug_frame)
# TODO(afrozm): Clean this up with tf.logging.log_every_n
if log_every_steps is not None and step_index % log_every_steps == 0:
tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean())
return (observations, cum_rewards)
|
python
|
def run_rollouts(
env, agent, initial_observations, step_limit=None, discount_factor=1.0,
log_every_steps=None, video_writers=(), color_bar=False,
many_rollouts_from_each_env=False
):
"""Runs a batch of rollouts from given initial observations."""
assert step_limit is not None or not many_rollouts_from_each_env, (
"When collecting many rollouts from each environment, time limit must "
"be set."
)
num_dones = 0
first_dones = np.array([False] * env.batch_size)
observations = initial_observations
step_index = 0
cum_rewards = np.zeros(env.batch_size)
for (video_writer, obs_stack) in zip(video_writers, initial_observations):
for (i, ob) in enumerate(obs_stack):
debug_frame = augment_observation(
ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1),
bar_color=((0, 255, 0) if color_bar else None)
)
video_writer.write(debug_frame)
def proceed():
if step_index < step_limit:
return num_dones < env.batch_size or many_rollouts_from_each_env
else:
return False
while proceed():
act_kwargs = {}
if agent.needs_env_state:
act_kwargs["env_state"] = env.state
actions = agent.act(observations, **act_kwargs)
(observations, rewards, dones) = env.step(actions)
observations = list(observations)
now_done_indices = []
for (i, done) in enumerate(dones):
if done and (not first_dones[i] or many_rollouts_from_each_env):
now_done_indices.append(i)
first_dones[i] = True
num_dones += 1
if now_done_indices:
# Unless many_rollouts_from_each_env, reset only envs done the first time
# in this timestep to ensure that we collect exactly 1 rollout from each
# env.
reset_observations = env.reset(now_done_indices)
for (i, observation) in zip(now_done_indices, reset_observations):
observations[i] = observation
observations = np.array(observations)
cum_rewards[~first_dones] = (
cum_rewards[~first_dones] * discount_factor + rewards[~first_dones]
)
step_index += 1
for (video_writer, obs_stack, reward, cum_reward, done) in zip(
video_writers, observations, rewards, cum_rewards, first_dones
):
if done:
continue
ob = obs_stack[-1]
debug_frame = augment_observation(
ob, reward=reward, cum_reward=cum_reward,
frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None)
)
video_writer.write(debug_frame)
# TODO(afrozm): Clean this up with tf.logging.log_every_n
if log_every_steps is not None and step_index % log_every_steps == 0:
tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean())
return (observations, cum_rewards)
|
[
"def",
"run_rollouts",
"(",
"env",
",",
"agent",
",",
"initial_observations",
",",
"step_limit",
"=",
"None",
",",
"discount_factor",
"=",
"1.0",
",",
"log_every_steps",
"=",
"None",
",",
"video_writers",
"=",
"(",
")",
",",
"color_bar",
"=",
"False",
",",
"many_rollouts_from_each_env",
"=",
"False",
")",
":",
"assert",
"step_limit",
"is",
"not",
"None",
"or",
"not",
"many_rollouts_from_each_env",
",",
"(",
"\"When collecting many rollouts from each environment, time limit must \"",
"\"be set.\"",
")",
"num_dones",
"=",
"0",
"first_dones",
"=",
"np",
".",
"array",
"(",
"[",
"False",
"]",
"*",
"env",
".",
"batch_size",
")",
"observations",
"=",
"initial_observations",
"step_index",
"=",
"0",
"cum_rewards",
"=",
"np",
".",
"zeros",
"(",
"env",
".",
"batch_size",
")",
"for",
"(",
"video_writer",
",",
"obs_stack",
")",
"in",
"zip",
"(",
"video_writers",
",",
"initial_observations",
")",
":",
"for",
"(",
"i",
",",
"ob",
")",
"in",
"enumerate",
"(",
"obs_stack",
")",
":",
"debug_frame",
"=",
"augment_observation",
"(",
"ob",
",",
"reward",
"=",
"0",
",",
"cum_reward",
"=",
"0",
",",
"frame_index",
"=",
"(",
"-",
"len",
"(",
"obs_stack",
")",
"+",
"i",
"+",
"1",
")",
",",
"bar_color",
"=",
"(",
"(",
"0",
",",
"255",
",",
"0",
")",
"if",
"color_bar",
"else",
"None",
")",
")",
"video_writer",
".",
"write",
"(",
"debug_frame",
")",
"def",
"proceed",
"(",
")",
":",
"if",
"step_index",
"<",
"step_limit",
":",
"return",
"num_dones",
"<",
"env",
".",
"batch_size",
"or",
"many_rollouts_from_each_env",
"else",
":",
"return",
"False",
"while",
"proceed",
"(",
")",
":",
"act_kwargs",
"=",
"{",
"}",
"if",
"agent",
".",
"needs_env_state",
":",
"act_kwargs",
"[",
"\"env_state\"",
"]",
"=",
"env",
".",
"state",
"actions",
"=",
"agent",
".",
"act",
"(",
"observations",
",",
"*",
"*",
"act_kwargs",
")",
"(",
"observations",
",",
"rewards",
",",
"dones",
")",
"=",
"env",
".",
"step",
"(",
"actions",
")",
"observations",
"=",
"list",
"(",
"observations",
")",
"now_done_indices",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"done",
")",
"in",
"enumerate",
"(",
"dones",
")",
":",
"if",
"done",
"and",
"(",
"not",
"first_dones",
"[",
"i",
"]",
"or",
"many_rollouts_from_each_env",
")",
":",
"now_done_indices",
".",
"append",
"(",
"i",
")",
"first_dones",
"[",
"i",
"]",
"=",
"True",
"num_dones",
"+=",
"1",
"if",
"now_done_indices",
":",
"# Unless many_rollouts_from_each_env, reset only envs done the first time",
"# in this timestep to ensure that we collect exactly 1 rollout from each",
"# env.",
"reset_observations",
"=",
"env",
".",
"reset",
"(",
"now_done_indices",
")",
"for",
"(",
"i",
",",
"observation",
")",
"in",
"zip",
"(",
"now_done_indices",
",",
"reset_observations",
")",
":",
"observations",
"[",
"i",
"]",
"=",
"observation",
"observations",
"=",
"np",
".",
"array",
"(",
"observations",
")",
"cum_rewards",
"[",
"~",
"first_dones",
"]",
"=",
"(",
"cum_rewards",
"[",
"~",
"first_dones",
"]",
"*",
"discount_factor",
"+",
"rewards",
"[",
"~",
"first_dones",
"]",
")",
"step_index",
"+=",
"1",
"for",
"(",
"video_writer",
",",
"obs_stack",
",",
"reward",
",",
"cum_reward",
",",
"done",
")",
"in",
"zip",
"(",
"video_writers",
",",
"observations",
",",
"rewards",
",",
"cum_rewards",
",",
"first_dones",
")",
":",
"if",
"done",
":",
"continue",
"ob",
"=",
"obs_stack",
"[",
"-",
"1",
"]",
"debug_frame",
"=",
"augment_observation",
"(",
"ob",
",",
"reward",
"=",
"reward",
",",
"cum_reward",
"=",
"cum_reward",
",",
"frame_index",
"=",
"step_index",
",",
"bar_color",
"=",
"(",
"(",
"255",
",",
"0",
",",
"0",
")",
"if",
"color_bar",
"else",
"None",
")",
")",
"video_writer",
".",
"write",
"(",
"debug_frame",
")",
"# TODO(afrozm): Clean this up with tf.logging.log_every_n",
"if",
"log_every_steps",
"is",
"not",
"None",
"and",
"step_index",
"%",
"log_every_steps",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Step %d, mean_score: %f\"",
",",
"step_index",
",",
"cum_rewards",
".",
"mean",
"(",
")",
")",
"return",
"(",
"observations",
",",
"cum_rewards",
")"
] |
Runs a batch of rollouts from given initial observations.
|
[
"Runs",
"a",
"batch",
"of",
"rollouts",
"from",
"given",
"initial",
"observations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L426-L499
|
22,233
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cnn_dailymail.py
|
_maybe_download_corpora
|
def _maybe_download_corpora(tmp_dir, dataset_split):
"""Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
"""
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if dataset_split == problem.DatasetSplit.TRAIN:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
elif dataset_split == problem.DatasetSplit.EVAL:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
return all_files, urls_path
|
python
|
def _maybe_download_corpora(tmp_dir, dataset_split):
"""Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
"""
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if dataset_split == problem.DatasetSplit.TRAIN:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
elif dataset_split == problem.DatasetSplit.EVAL:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
return all_files, urls_path
|
[
"def",
"_maybe_download_corpora",
"(",
"tmp_dir",
",",
"dataset_split",
")",
":",
"cnn_filename",
"=",
"\"cnn_stories.tgz\"",
"cnn_finalpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"cnn/stories/\"",
")",
"dailymail_filename",
"=",
"\"dailymail_stories.tgz\"",
"dailymail_finalpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"dailymail/stories/\"",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"cnn_finalpath",
")",
":",
"cnn_file",
"=",
"generator_utils",
".",
"maybe_download_from_drive",
"(",
"tmp_dir",
",",
"cnn_filename",
",",
"_CNN_STORIES_DRIVE_URL",
")",
"with",
"tarfile",
".",
"open",
"(",
"cnn_file",
",",
"\"r:gz\"",
")",
"as",
"cnn_tar",
":",
"cnn_tar",
".",
"extractall",
"(",
"tmp_dir",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"dailymail_finalpath",
")",
":",
"dailymail_file",
"=",
"generator_utils",
".",
"maybe_download_from_drive",
"(",
"tmp_dir",
",",
"dailymail_filename",
",",
"_DAILYMAIL_STORIES_DRIVE_URL",
")",
"with",
"tarfile",
".",
"open",
"(",
"dailymail_file",
",",
"\"r:gz\"",
")",
"as",
"dailymail_tar",
":",
"dailymail_tar",
".",
"extractall",
"(",
"tmp_dir",
")",
"cnn_files",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"cnn_finalpath",
"+",
"\"*\"",
")",
"dailymail_files",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"dailymail_finalpath",
"+",
"\"*\"",
")",
"all_files",
"=",
"cnn_files",
"+",
"dailymail_files",
"if",
"dataset_split",
"==",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"urls_path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"\"all_train.txt\"",
",",
"_TRAIN_URLS",
")",
"elif",
"dataset_split",
"==",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"urls_path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"\"all_val.txt\"",
",",
"_DEV_URLS",
")",
"else",
":",
"urls_path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"\"all_test.txt\"",
",",
"_TEST_URLS",
")",
"return",
"all_files",
",",
"urls_path"
] |
Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
|
[
"Download",
"corpora",
"if",
"necessary",
"and",
"unzip",
"them",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cnn_dailymail.py#L67-L107
|
22,234
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cnn_dailymail.py
|
example_splits
|
def example_splits(url_file, all_files):
"""Generate splits of the data."""
def generate_hash(inp):
"""Generate a sha1 hash to match the raw url to the filename extracted."""
h = hashlib.sha1()
h.update(inp)
return h.hexdigest()
all_files_map = {f.split("/")[-1]: f for f in all_files}
urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)]
filelist = []
for url in urls:
url_hash = generate_hash(url)
filename = url_hash + ".story"
if filename not in all_files_map:
tf.logging.info("Missing file: %s" % url)
continue
filelist.append(all_files_map[filename])
tf.logging.info("Found %d examples" % len(filelist))
return filelist
|
python
|
def example_splits(url_file, all_files):
"""Generate splits of the data."""
def generate_hash(inp):
"""Generate a sha1 hash to match the raw url to the filename extracted."""
h = hashlib.sha1()
h.update(inp)
return h.hexdigest()
all_files_map = {f.split("/")[-1]: f for f in all_files}
urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)]
filelist = []
for url in urls:
url_hash = generate_hash(url)
filename = url_hash + ".story"
if filename not in all_files_map:
tf.logging.info("Missing file: %s" % url)
continue
filelist.append(all_files_map[filename])
tf.logging.info("Found %d examples" % len(filelist))
return filelist
|
[
"def",
"example_splits",
"(",
"url_file",
",",
"all_files",
")",
":",
"def",
"generate_hash",
"(",
"inp",
")",
":",
"\"\"\"Generate a sha1 hash to match the raw url to the filename extracted.\"\"\"",
"h",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"h",
".",
"update",
"(",
"inp",
")",
"return",
"h",
".",
"hexdigest",
"(",
")",
"all_files_map",
"=",
"{",
"f",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
":",
"f",
"for",
"f",
"in",
"all_files",
"}",
"urls",
"=",
"[",
"line",
".",
"strip",
"(",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"for",
"line",
"in",
"tf",
".",
"gfile",
".",
"Open",
"(",
"url_file",
")",
"]",
"filelist",
"=",
"[",
"]",
"for",
"url",
"in",
"urls",
":",
"url_hash",
"=",
"generate_hash",
"(",
"url",
")",
"filename",
"=",
"url_hash",
"+",
"\".story\"",
"if",
"filename",
"not",
"in",
"all_files_map",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Missing file: %s\"",
"%",
"url",
")",
"continue",
"filelist",
".",
"append",
"(",
"all_files_map",
"[",
"filename",
"]",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Found %d examples\"",
"%",
"len",
"(",
"filelist",
")",
")",
"return",
"filelist"
] |
Generate splits of the data.
|
[
"Generate",
"splits",
"of",
"the",
"data",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cnn_dailymail.py#L110-L134
|
22,235
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cnn_dailymail.py
|
example_generator
|
def example_generator(all_files, urls_path, sum_token):
"""Generate examples."""
def fix_run_on_sents(line):
if u"@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + u"."
filelist = example_splits(urls_path, all_files)
story_summary_split_token = u" <summary> " if sum_token else " "
for story_file in filelist:
story = []
summary = []
reading_highlights = False
for line in tf.gfile.Open(story_file, "rb"):
line = text_encoder.to_unicode_utf8(line.strip())
line = fix_run_on_sents(line)
if not line:
continue
elif line.startswith(u"@highlight"):
if not story:
break # No article text.
reading_highlights = True
elif reading_highlights:
summary.append(line)
else:
story.append(line)
if (not story) or not summary:
continue
yield " ".join(story) + story_summary_split_token + " ".join(summary)
|
python
|
def example_generator(all_files, urls_path, sum_token):
"""Generate examples."""
def fix_run_on_sents(line):
if u"@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + u"."
filelist = example_splits(urls_path, all_files)
story_summary_split_token = u" <summary> " if sum_token else " "
for story_file in filelist:
story = []
summary = []
reading_highlights = False
for line in tf.gfile.Open(story_file, "rb"):
line = text_encoder.to_unicode_utf8(line.strip())
line = fix_run_on_sents(line)
if not line:
continue
elif line.startswith(u"@highlight"):
if not story:
break # No article text.
reading_highlights = True
elif reading_highlights:
summary.append(line)
else:
story.append(line)
if (not story) or not summary:
continue
yield " ".join(story) + story_summary_split_token + " ".join(summary)
|
[
"def",
"example_generator",
"(",
"all_files",
",",
"urls_path",
",",
"sum_token",
")",
":",
"def",
"fix_run_on_sents",
"(",
"line",
")",
":",
"if",
"u\"@highlight\"",
"in",
"line",
":",
"return",
"line",
"if",
"not",
"line",
":",
"return",
"line",
"if",
"line",
"[",
"-",
"1",
"]",
"in",
"END_TOKENS",
":",
"return",
"line",
"return",
"line",
"+",
"u\".\"",
"filelist",
"=",
"example_splits",
"(",
"urls_path",
",",
"all_files",
")",
"story_summary_split_token",
"=",
"u\" <summary> \"",
"if",
"sum_token",
"else",
"\" \"",
"for",
"story_file",
"in",
"filelist",
":",
"story",
"=",
"[",
"]",
"summary",
"=",
"[",
"]",
"reading_highlights",
"=",
"False",
"for",
"line",
"in",
"tf",
".",
"gfile",
".",
"Open",
"(",
"story_file",
",",
"\"rb\"",
")",
":",
"line",
"=",
"text_encoder",
".",
"to_unicode_utf8",
"(",
"line",
".",
"strip",
"(",
")",
")",
"line",
"=",
"fix_run_on_sents",
"(",
"line",
")",
"if",
"not",
"line",
":",
"continue",
"elif",
"line",
".",
"startswith",
"(",
"u\"@highlight\"",
")",
":",
"if",
"not",
"story",
":",
"break",
"# No article text.",
"reading_highlights",
"=",
"True",
"elif",
"reading_highlights",
":",
"summary",
".",
"append",
"(",
"line",
")",
"else",
":",
"story",
".",
"append",
"(",
"line",
")",
"if",
"(",
"not",
"story",
")",
"or",
"not",
"summary",
":",
"continue",
"yield",
"\" \"",
".",
"join",
"(",
"story",
")",
"+",
"story_summary_split_token",
"+",
"\" \"",
".",
"join",
"(",
"summary",
")"
] |
Generate examples.
|
[
"Generate",
"examples",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cnn_dailymail.py#L137-L173
|
22,236
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/cnn_dailymail.py
|
write_raw_text_to_files
|
def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir):
"""Write text to files."""
def write_to_file(all_files, urls_path, tmp_dir, filename):
"""Write text to files."""
with io.open(
os.path.join(tmp_dir, filename + ".source"), "w",
encoding="utf-8") as fstory:
with io.open(
os.path.join(tmp_dir, filename + ".target"), "w",
encoding="utf-8") as fsummary:
for example in example_generator(all_files, urls_path, sum_token=True):
story, summary = _story_summary_split(example)
fstory.write(story + "\n")
fsummary.write(summary + "\n")
if dataset_split == problem.DatasetSplit.TRAIN:
filename = "cnndm.train"
elif dataset_split == problem.DatasetSplit.EVAL:
filename = "cnndm.dev"
else:
filename = "cnndm.test"
tf.logging.info("Writing %s" % filename)
write_to_file(all_files, urls_path, tmp_dir, filename)
|
python
|
def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir):
"""Write text to files."""
def write_to_file(all_files, urls_path, tmp_dir, filename):
"""Write text to files."""
with io.open(
os.path.join(tmp_dir, filename + ".source"), "w",
encoding="utf-8") as fstory:
with io.open(
os.path.join(tmp_dir, filename + ".target"), "w",
encoding="utf-8") as fsummary:
for example in example_generator(all_files, urls_path, sum_token=True):
story, summary = _story_summary_split(example)
fstory.write(story + "\n")
fsummary.write(summary + "\n")
if dataset_split == problem.DatasetSplit.TRAIN:
filename = "cnndm.train"
elif dataset_split == problem.DatasetSplit.EVAL:
filename = "cnndm.dev"
else:
filename = "cnndm.test"
tf.logging.info("Writing %s" % filename)
write_to_file(all_files, urls_path, tmp_dir, filename)
|
[
"def",
"write_raw_text_to_files",
"(",
"all_files",
",",
"urls_path",
",",
"dataset_split",
",",
"tmp_dir",
")",
":",
"def",
"write_to_file",
"(",
"all_files",
",",
"urls_path",
",",
"tmp_dir",
",",
"filename",
")",
":",
"\"\"\"Write text to files.\"\"\"",
"with",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"filename",
"+",
"\".source\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"fstory",
":",
"with",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"filename",
"+",
"\".target\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"fsummary",
":",
"for",
"example",
"in",
"example_generator",
"(",
"all_files",
",",
"urls_path",
",",
"sum_token",
"=",
"True",
")",
":",
"story",
",",
"summary",
"=",
"_story_summary_split",
"(",
"example",
")",
"fstory",
".",
"write",
"(",
"story",
"+",
"\"\\n\"",
")",
"fsummary",
".",
"write",
"(",
"summary",
"+",
"\"\\n\"",
")",
"if",
"dataset_split",
"==",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"filename",
"=",
"\"cnndm.train\"",
"elif",
"dataset_split",
"==",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"filename",
"=",
"\"cnndm.dev\"",
"else",
":",
"filename",
"=",
"\"cnndm.test\"",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Writing %s\"",
"%",
"filename",
")",
"write_to_file",
"(",
"all_files",
",",
"urls_path",
",",
"tmp_dir",
",",
"filename",
")"
] |
Write text to files.
|
[
"Write",
"text",
"to",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cnn_dailymail.py#L183-L207
|
22,237
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
infer_last_epoch_num
|
def infer_last_epoch_num(data_dir):
"""Infer highest epoch number from file names in data_dir."""
names = os.listdir(data_dir)
epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name)
for name in names]
epochs_str = sum(epochs_str, [])
return max([int(epoch_str) for epoch_str in epochs_str])
|
python
|
def infer_last_epoch_num(data_dir):
"""Infer highest epoch number from file names in data_dir."""
names = os.listdir(data_dir)
epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name)
for name in names]
epochs_str = sum(epochs_str, [])
return max([int(epoch_str) for epoch_str in epochs_str])
|
[
"def",
"infer_last_epoch_num",
"(",
"data_dir",
")",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"data_dir",
")",
"epochs_str",
"=",
"[",
"re",
".",
"findall",
"(",
"pattern",
"=",
"r\".*\\.(-?\\d+)$\"",
",",
"string",
"=",
"name",
")",
"for",
"name",
"in",
"names",
"]",
"epochs_str",
"=",
"sum",
"(",
"epochs_str",
",",
"[",
"]",
")",
"return",
"max",
"(",
"[",
"int",
"(",
"epoch_str",
")",
"for",
"epoch_str",
"in",
"epochs_str",
"]",
")"
] |
Infer highest epoch number from file names in data_dir.
|
[
"Infer",
"highest",
"epoch",
"number",
"from",
"file",
"names",
"in",
"data_dir",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L123-L129
|
22,238
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
setup_and_load_epoch
|
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
"""
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env
|
python
|
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
"""
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env
|
[
"def",
"setup_and_load_epoch",
"(",
"hparams",
",",
"data_dir",
",",
"which_epoch_data",
"=",
"None",
")",
":",
"t2t_env",
"=",
"rl_utils",
".",
"setup_env",
"(",
"hparams",
",",
"batch_size",
"=",
"hparams",
".",
"real_batch_size",
",",
"max_num_noops",
"=",
"hparams",
".",
"max_num_noops",
")",
"# Load data.",
"if",
"which_epoch_data",
"is",
"not",
"None",
":",
"if",
"which_epoch_data",
"==",
"\"last\"",
":",
"which_epoch_data",
"=",
"infer_last_epoch_num",
"(",
"data_dir",
")",
"assert",
"isinstance",
"(",
"which_epoch_data",
",",
"int",
")",
",",
"\"{}\"",
".",
"format",
"(",
"type",
"(",
"which_epoch_data",
")",
")",
"t2t_env",
".",
"start_new_epoch",
"(",
"which_epoch_data",
",",
"data_dir",
")",
"else",
":",
"t2t_env",
".",
"start_new_epoch",
"(",
"-",
"999",
")",
"return",
"t2t_env"
] |
Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
|
[
"Load",
"T2TGymEnv",
"with",
"data",
"from",
"one",
"epoch",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L132-L156
|
22,239
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
infer_game_name_from_filenames
|
def infer_game_name_from_filenames(data_dir, snake_case=True):
"""Infer name from filenames."""
names = os.listdir(data_dir)
game_names = [re.findall(pattern=r"^Gym(.*)NoFrameskip", string=name)
for name in names]
assert game_names, "No data files found in {}".format(data_dir)
game_names = sum(game_names, [])
game_name = game_names[0]
assert all(game_name == other for other in game_names), \
"There are multiple different game names in {}".format(data_dir)
if snake_case:
game_name = camelcase_to_snakecase(game_name)
return game_name
|
python
|
def infer_game_name_from_filenames(data_dir, snake_case=True):
"""Infer name from filenames."""
names = os.listdir(data_dir)
game_names = [re.findall(pattern=r"^Gym(.*)NoFrameskip", string=name)
for name in names]
assert game_names, "No data files found in {}".format(data_dir)
game_names = sum(game_names, [])
game_name = game_names[0]
assert all(game_name == other for other in game_names), \
"There are multiple different game names in {}".format(data_dir)
if snake_case:
game_name = camelcase_to_snakecase(game_name)
return game_name
|
[
"def",
"infer_game_name_from_filenames",
"(",
"data_dir",
",",
"snake_case",
"=",
"True",
")",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"data_dir",
")",
"game_names",
"=",
"[",
"re",
".",
"findall",
"(",
"pattern",
"=",
"r\"^Gym(.*)NoFrameskip\"",
",",
"string",
"=",
"name",
")",
"for",
"name",
"in",
"names",
"]",
"assert",
"game_names",
",",
"\"No data files found in {}\"",
".",
"format",
"(",
"data_dir",
")",
"game_names",
"=",
"sum",
"(",
"game_names",
",",
"[",
"]",
")",
"game_name",
"=",
"game_names",
"[",
"0",
"]",
"assert",
"all",
"(",
"game_name",
"==",
"other",
"for",
"other",
"in",
"game_names",
")",
",",
"\"There are multiple different game names in {}\"",
".",
"format",
"(",
"data_dir",
")",
"if",
"snake_case",
":",
"game_name",
"=",
"camelcase_to_snakecase",
"(",
"game_name",
")",
"return",
"game_name"
] |
Infer name from filenames.
|
[
"Infer",
"name",
"from",
"filenames",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L159-L171
|
22,240
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
wrap_with_monitor
|
def wrap_with_monitor(env, video_dir):
"""Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
"""
env = ExtendToEvenDimentions(env)
env = RenderObservations(env) # pylint: disable=redefined-variable-type
env = gym.wrappers.Monitor(env, video_dir, force=True,
video_callable=lambda idx: True,
write_upon_reset=True)
return env
|
python
|
def wrap_with_monitor(env, video_dir):
"""Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
"""
env = ExtendToEvenDimentions(env)
env = RenderObservations(env) # pylint: disable=redefined-variable-type
env = gym.wrappers.Monitor(env, video_dir, force=True,
video_callable=lambda idx: True,
write_upon_reset=True)
return env
|
[
"def",
"wrap_with_monitor",
"(",
"env",
",",
"video_dir",
")",
":",
"env",
"=",
"ExtendToEvenDimentions",
"(",
"env",
")",
"env",
"=",
"RenderObservations",
"(",
"env",
")",
"# pylint: disable=redefined-variable-type",
"env",
"=",
"gym",
".",
"wrappers",
".",
"Monitor",
"(",
"env",
",",
"video_dir",
",",
"force",
"=",
"True",
",",
"video_callable",
"=",
"lambda",
"idx",
":",
"True",
",",
"write_upon_reset",
"=",
"True",
")",
"return",
"env"
] |
Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
|
[
"Wrap",
"environment",
"with",
"gym",
".",
"Monitor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L245-L264
|
22,241
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
create_simulated_env
|
def create_simulated_env(
output_dir, grayscale, resize_width_factor, resize_height_factor,
frame_stack_size, generative_model, generative_model_params,
random_starts=True, which_epoch_data="last", **other_hparams
):
""""Create SimulatedEnv with minimal subset of hparams."""
# We need these, to initialize T2TGymEnv, but these values (hopefully) have
# no effect on player.
a_bit_risky_defaults = {
"game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1)
"real_batch_size": 1,
"rl_env_max_episode_steps": -1,
"max_num_noops": 0
}
for key in a_bit_risky_defaults:
if key not in other_hparams:
other_hparams[key] = a_bit_risky_defaults[key]
hparams = hparam.HParams(
grayscale=grayscale,
resize_width_factor=resize_width_factor,
resize_height_factor=resize_height_factor,
frame_stack_size=frame_stack_size,
generative_model=generative_model,
generative_model_params=generative_model_params,
**other_hparams
)
return load_data_and_make_simulated_env(
output_dir, wm_dir=None, hparams=hparams,
which_epoch_data=which_epoch_data,
random_starts=random_starts)
|
python
|
def create_simulated_env(
output_dir, grayscale, resize_width_factor, resize_height_factor,
frame_stack_size, generative_model, generative_model_params,
random_starts=True, which_epoch_data="last", **other_hparams
):
""""Create SimulatedEnv with minimal subset of hparams."""
# We need these, to initialize T2TGymEnv, but these values (hopefully) have
# no effect on player.
a_bit_risky_defaults = {
"game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1)
"real_batch_size": 1,
"rl_env_max_episode_steps": -1,
"max_num_noops": 0
}
for key in a_bit_risky_defaults:
if key not in other_hparams:
other_hparams[key] = a_bit_risky_defaults[key]
hparams = hparam.HParams(
grayscale=grayscale,
resize_width_factor=resize_width_factor,
resize_height_factor=resize_height_factor,
frame_stack_size=frame_stack_size,
generative_model=generative_model,
generative_model_params=generative_model_params,
**other_hparams
)
return load_data_and_make_simulated_env(
output_dir, wm_dir=None, hparams=hparams,
which_epoch_data=which_epoch_data,
random_starts=random_starts)
|
[
"def",
"create_simulated_env",
"(",
"output_dir",
",",
"grayscale",
",",
"resize_width_factor",
",",
"resize_height_factor",
",",
"frame_stack_size",
",",
"generative_model",
",",
"generative_model_params",
",",
"random_starts",
"=",
"True",
",",
"which_epoch_data",
"=",
"\"last\"",
",",
"*",
"*",
"other_hparams",
")",
":",
"# We need these, to initialize T2TGymEnv, but these values (hopefully) have",
"# no effect on player.",
"a_bit_risky_defaults",
"=",
"{",
"\"game\"",
":",
"\"pong\"",
",",
"# assumes that T2TGymEnv has always reward_range (-1,1)",
"\"real_batch_size\"",
":",
"1",
",",
"\"rl_env_max_episode_steps\"",
":",
"-",
"1",
",",
"\"max_num_noops\"",
":",
"0",
"}",
"for",
"key",
"in",
"a_bit_risky_defaults",
":",
"if",
"key",
"not",
"in",
"other_hparams",
":",
"other_hparams",
"[",
"key",
"]",
"=",
"a_bit_risky_defaults",
"[",
"key",
"]",
"hparams",
"=",
"hparam",
".",
"HParams",
"(",
"grayscale",
"=",
"grayscale",
",",
"resize_width_factor",
"=",
"resize_width_factor",
",",
"resize_height_factor",
"=",
"resize_height_factor",
",",
"frame_stack_size",
"=",
"frame_stack_size",
",",
"generative_model",
"=",
"generative_model",
",",
"generative_model_params",
"=",
"generative_model_params",
",",
"*",
"*",
"other_hparams",
")",
"return",
"load_data_and_make_simulated_env",
"(",
"output_dir",
",",
"wm_dir",
"=",
"None",
",",
"hparams",
"=",
"hparams",
",",
"which_epoch_data",
"=",
"which_epoch_data",
",",
"random_starts",
"=",
"random_starts",
")"
] |
Create SimulatedEnv with minimal subset of hparams.
|
[
"Create",
"SimulatedEnv",
"with",
"minimal",
"subset",
"of",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L267-L298
|
22,242
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
infer_paths
|
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories
|
python
|
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories
|
[
"def",
"infer_paths",
"(",
"output_dir",
",",
"*",
"*",
"subdirs",
")",
":",
"directories",
"=",
"{",
"}",
"for",
"name",
",",
"path",
"in",
"six",
".",
"iteritems",
"(",
"subdirs",
")",
":",
"directories",
"[",
"name",
"]",
"=",
"path",
"if",
"path",
"else",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"name",
")",
"directories",
"[",
"\"output_dir\"",
"]",
"=",
"output_dir",
"return",
"directories"
] |
Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
|
[
"Infers",
"standard",
"paths",
"to",
"policy",
"and",
"model",
"directories",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L377-L396
|
22,243
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
PPOPolicyInferencer.infer
|
def infer(self, ob):
"""Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf.
"""
self._add_to_stack(ob)
logits, vf = self.infer_from_frame_stack(self._frame_stack)
return logits, vf
|
python
|
def infer(self, ob):
"""Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf.
"""
self._add_to_stack(ob)
logits, vf = self.infer_from_frame_stack(self._frame_stack)
return logits, vf
|
[
"def",
"infer",
"(",
"self",
",",
"ob",
")",
":",
"self",
".",
"_add_to_stack",
"(",
"ob",
")",
"logits",
",",
"vf",
"=",
"self",
".",
"infer_from_frame_stack",
"(",
"self",
".",
"_frame_stack",
")",
"return",
"logits",
",",
"vf"
] |
Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf.
|
[
"Add",
"new",
"observation",
"to",
"frame",
"stack",
"and",
"infer",
"policy",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L350-L361
|
22,244
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/player_utils.py
|
PPOPolicyInferencer.infer_from_frame_stack
|
def infer_from_frame_stack(self, ob_stack):
"""Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
"""
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf
|
python
|
def infer_from_frame_stack(self, ob_stack):
"""Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
"""
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf
|
[
"def",
"infer_from_frame_stack",
"(",
"self",
",",
"ob_stack",
")",
":",
"logits",
",",
"vf",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"logits_t",
",",
"self",
".",
"value_function_t",
"]",
",",
"feed_dict",
"=",
"{",
"self",
".",
"obs_t",
":",
"ob_stack",
"}",
")",
"return",
"logits",
",",
"vf"
] |
Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
|
[
"Infer",
"policy",
"from",
"stack",
"of",
"observations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player_utils.py#L363-L374
|
22,245
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/babi_qa.py
|
_normalize_string
|
def _normalize_string(raw_str):
"""Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split()
"""
return " ".join(
token.strip()
for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str)))
|
python
|
def _normalize_string(raw_str):
"""Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split()
"""
return " ".join(
token.strip()
for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str)))
|
[
"def",
"_normalize_string",
"(",
"raw_str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"token",
".",
"strip",
"(",
")",
"for",
"token",
"in",
"tokenizer",
".",
"encode",
"(",
"text_encoder",
".",
"native_to_unicode",
"(",
"raw_str",
")",
")",
")"
] |
Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split()
|
[
"Normalizes",
"the",
"string",
"using",
"tokenizer",
".",
"encode",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/babi_qa.py#L84-L95
|
22,246
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/babi_qa.py
|
_register_babi_problems
|
def _register_babi_problems():
"""It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes.
"""
for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]:
for problem_name, babi_task_id in six.iteritems(_problems_to_register()):
problem_class = type("BabiQaConcat" + problem_name + subset_suffix,
(BabiQaConcat,), {
"babi_task_id": babi_task_id,
"babi_subset": subset
})
registry.register_problem(problem_class)
REGISTERED_PROBLEMS.append(problem_class.name)
|
python
|
def _register_babi_problems():
"""It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes.
"""
for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]:
for problem_name, babi_task_id in six.iteritems(_problems_to_register()):
problem_class = type("BabiQaConcat" + problem_name + subset_suffix,
(BabiQaConcat,), {
"babi_task_id": babi_task_id,
"babi_subset": subset
})
registry.register_problem(problem_class)
REGISTERED_PROBLEMS.append(problem_class.name)
|
[
"def",
"_register_babi_problems",
"(",
")",
":",
"for",
"(",
"subset",
",",
"subset_suffix",
")",
"in",
"[",
"(",
"\"en\"",
",",
"\"_1k\"",
")",
",",
"(",
"\"en-10k\"",
",",
"\"_10k\"",
")",
"]",
":",
"for",
"problem_name",
",",
"babi_task_id",
"in",
"six",
".",
"iteritems",
"(",
"_problems_to_register",
"(",
")",
")",
":",
"problem_class",
"=",
"type",
"(",
"\"BabiQaConcat\"",
"+",
"problem_name",
"+",
"subset_suffix",
",",
"(",
"BabiQaConcat",
",",
")",
",",
"{",
"\"babi_task_id\"",
":",
"babi_task_id",
",",
"\"babi_subset\"",
":",
"subset",
"}",
")",
"registry",
".",
"register_problem",
"(",
"problem_class",
")",
"REGISTERED_PROBLEMS",
".",
"append",
"(",
"problem_class",
".",
"name",
")"
] |
It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes.
|
[
"It",
"dynamically",
"instantiates",
"a",
"class",
"for",
"each",
"babi",
"subsets",
"-",
"tasks",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/babi_qa.py#L510-L539
|
22,247
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/babi_qa.py
|
BabiQa.get_labels_encoder
|
def get_labels_encoder(self, data_dir):
"""Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
"""
label_filepath = os.path.join(data_dir, self.vocab_filename)
return text_encoder.TokenTextEncoder(label_filepath)
|
python
|
def get_labels_encoder(self, data_dir):
"""Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
"""
label_filepath = os.path.join(data_dir, self.vocab_filename)
return text_encoder.TokenTextEncoder(label_filepath)
|
[
"def",
"get_labels_encoder",
"(",
"self",
",",
"data_dir",
")",
":",
"label_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"self",
".",
"vocab_filename",
")",
"return",
"text_encoder",
".",
"TokenTextEncoder",
"(",
"label_filepath",
")"
] |
Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
|
[
"Builds",
"encoder",
"for",
"the",
"given",
"class",
"labels",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/babi_qa.py#L326-L336
|
22,248
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/babi_qa.py
|
BabiQa.generate_encoded_samples
|
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict.
"""
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
encoder = self.get_or_create_vocab(data_dir, tmp_dir)
label_encoder = self.get_labels_encoder(data_dir)
for sample in generator:
inputs = encoder.encode(sample["inputs"])
inputs.append(text_encoder.EOS_ID)
context = encoder.encode(sample["context"])
context.append(text_encoder.EOS_ID)
targets = label_encoder.encode(sample["targets"])
sample["targets"] = targets
yield {"inputs": inputs, "context": context, "targets": targets}
|
python
|
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict.
"""
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
encoder = self.get_or_create_vocab(data_dir, tmp_dir)
label_encoder = self.get_labels_encoder(data_dir)
for sample in generator:
inputs = encoder.encode(sample["inputs"])
inputs.append(text_encoder.EOS_ID)
context = encoder.encode(sample["context"])
context.append(text_encoder.EOS_ID)
targets = label_encoder.encode(sample["targets"])
sample["targets"] = targets
yield {"inputs": inputs, "context": context, "targets": targets}
|
[
"def",
"generate_encoded_samples",
"(",
"self",
",",
"data_dir",
",",
"tmp_dir",
",",
"dataset_split",
")",
":",
"generator",
"=",
"self",
".",
"generate_samples",
"(",
"data_dir",
",",
"tmp_dir",
",",
"dataset_split",
")",
"encoder",
"=",
"self",
".",
"get_or_create_vocab",
"(",
"data_dir",
",",
"tmp_dir",
")",
"label_encoder",
"=",
"self",
".",
"get_labels_encoder",
"(",
"data_dir",
")",
"for",
"sample",
"in",
"generator",
":",
"inputs",
"=",
"encoder",
".",
"encode",
"(",
"sample",
"[",
"\"inputs\"",
"]",
")",
"inputs",
".",
"append",
"(",
"text_encoder",
".",
"EOS_ID",
")",
"context",
"=",
"encoder",
".",
"encode",
"(",
"sample",
"[",
"\"context\"",
"]",
")",
"context",
".",
"append",
"(",
"text_encoder",
".",
"EOS_ID",
")",
"targets",
"=",
"label_encoder",
".",
"encode",
"(",
"sample",
"[",
"\"targets\"",
"]",
")",
"sample",
"[",
"\"targets\"",
"]",
"=",
"targets",
"yield",
"{",
"\"inputs\"",
":",
"inputs",
",",
"\"context\"",
":",
"context",
",",
"\"targets\"",
":",
"targets",
"}"
] |
A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict.
|
[
"A",
"generator",
"that",
"generates",
"samples",
"that",
"are",
"encoded",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/babi_qa.py#L364-L386
|
22,249
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/timeseries.py
|
TimeseriesProblem.dataset_splits
|
def dataset_splits(self):
"""Splits of data to produce and number the output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}]
|
python
|
def dataset_splits(self):
"""Splits of data to produce and number the output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}]
|
[
"def",
"dataset_splits",
"(",
"self",
")",
":",
"return",
"[",
"{",
"\"split\"",
":",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
",",
"\"shards\"",
":",
"self",
".",
"num_train_shards",
",",
"}",
",",
"{",
"\"split\"",
":",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
",",
"\"shards\"",
":",
"self",
".",
"num_eval_shards",
",",
"}",
",",
"{",
"\"split\"",
":",
"problem",
".",
"DatasetSplit",
".",
"TEST",
",",
"\"shards\"",
":",
"self",
".",
"num_test_shards",
",",
"}",
"]"
] |
Splits of data to produce and number the output shards for each.
|
[
"Splits",
"of",
"data",
"to",
"produce",
"and",
"number",
"the",
"output",
"shards",
"for",
"each",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/timeseries.py#L49-L60
|
22,250
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/librispeech.py
|
add_librispeech_hparams
|
def add_librispeech_hparams(hparams):
"""Adding to base hparams the attributes for for librispeech."""
hparams.batch_size = 36
hparams.audio_compression = 8
hparams.hidden_size = 2048
hparams.max_input_seq_length = 600000
hparams.max_target_seq_length = 350
hparams.max_length = hparams.max_input_seq_length
hparams.min_length_bucket = hparams.max_input_seq_length // 2
hparams.learning_rate = 0.05
hparams.train_steps = 5000000
hparams.num_hidden_layers = 4
return hparams
|
python
|
def add_librispeech_hparams(hparams):
"""Adding to base hparams the attributes for for librispeech."""
hparams.batch_size = 36
hparams.audio_compression = 8
hparams.hidden_size = 2048
hparams.max_input_seq_length = 600000
hparams.max_target_seq_length = 350
hparams.max_length = hparams.max_input_seq_length
hparams.min_length_bucket = hparams.max_input_seq_length // 2
hparams.learning_rate = 0.05
hparams.train_steps = 5000000
hparams.num_hidden_layers = 4
return hparams
|
[
"def",
"add_librispeech_hparams",
"(",
"hparams",
")",
":",
"hparams",
".",
"batch_size",
"=",
"36",
"hparams",
".",
"audio_compression",
"=",
"8",
"hparams",
".",
"hidden_size",
"=",
"2048",
"hparams",
".",
"max_input_seq_length",
"=",
"600000",
"hparams",
".",
"max_target_seq_length",
"=",
"350",
"hparams",
".",
"max_length",
"=",
"hparams",
".",
"max_input_seq_length",
"hparams",
".",
"min_length_bucket",
"=",
"hparams",
".",
"max_input_seq_length",
"//",
"2",
"hparams",
".",
"learning_rate",
"=",
"0.05",
"hparams",
".",
"train_steps",
"=",
"5000000",
"hparams",
".",
"num_hidden_layers",
"=",
"4",
"return",
"hparams"
] |
Adding to base hparams the attributes for for librispeech.
|
[
"Adding",
"to",
"base",
"hparams",
"the",
"attributes",
"for",
"for",
"librispeech",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/librispeech.py#L261-L273
|
22,251
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wsj_parsing.py
|
words_and_tags_from_wsj_tree
|
def words_and_tags_from_wsj_tree(tree_string):
"""Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree)
"""
stack, tags, words = [], [], []
for tok in tree_string.strip().split():
if tok[0] == "(":
symbol = tok[1:]
tags.append(symbol)
stack.append(symbol)
else:
assert tok[-1] == ")"
stack.pop() # Pop the POS-tag.
while tok[-2] == ")":
tags.append("/" + stack.pop())
tok = tok[:-1]
words.append(tok[:-1])
return str.join(" ", words), str.join(" ", tags[1:-1])
|
python
|
def words_and_tags_from_wsj_tree(tree_string):
"""Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree)
"""
stack, tags, words = [], [], []
for tok in tree_string.strip().split():
if tok[0] == "(":
symbol = tok[1:]
tags.append(symbol)
stack.append(symbol)
else:
assert tok[-1] == ")"
stack.pop() # Pop the POS-tag.
while tok[-2] == ")":
tags.append("/" + stack.pop())
tok = tok[:-1]
words.append(tok[:-1])
return str.join(" ", words), str.join(" ", tags[1:-1])
|
[
"def",
"words_and_tags_from_wsj_tree",
"(",
"tree_string",
")",
":",
"stack",
",",
"tags",
",",
"words",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"tok",
"in",
"tree_string",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
":",
"if",
"tok",
"[",
"0",
"]",
"==",
"\"(\"",
":",
"symbol",
"=",
"tok",
"[",
"1",
":",
"]",
"tags",
".",
"append",
"(",
"symbol",
")",
"stack",
".",
"append",
"(",
"symbol",
")",
"else",
":",
"assert",
"tok",
"[",
"-",
"1",
"]",
"==",
"\")\"",
"stack",
".",
"pop",
"(",
")",
"# Pop the POS-tag.",
"while",
"tok",
"[",
"-",
"2",
"]",
"==",
"\")\"",
":",
"tags",
".",
"append",
"(",
"\"/\"",
"+",
"stack",
".",
"pop",
"(",
")",
")",
"tok",
"=",
"tok",
"[",
":",
"-",
"1",
"]",
"words",
".",
"append",
"(",
"tok",
"[",
":",
"-",
"1",
"]",
")",
"return",
"str",
".",
"join",
"(",
"\" \"",
",",
"words",
")",
",",
"str",
".",
"join",
"(",
"\" \"",
",",
"tags",
"[",
"1",
":",
"-",
"1",
"]",
")"
] |
Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree)
|
[
"Generates",
"linearized",
"trees",
"and",
"tokens",
"from",
"the",
"wsj",
"tree",
"format",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wsj_parsing.py#L79-L103
|
22,252
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/validate_data.py
|
aggregate_stats
|
def aggregate_stats(stats_files):
"""Aggregate stats in per-shard stats files."""
all_stats = {}
for fname in stats_files:
with tf.gfile.Open(fname) as f:
stats = json.loads(f.read())
for k, v in stats.iteritems():
if k not in all_stats:
if isinstance(v, list):
all_stats[k] = []
else:
all_stats[k] = 0
if isinstance(v, list):
all_stats[k].extend(v)
else:
all_stats[k] += v
stats = all_stats
ref_coverage = float(stats["total_found_refs"]) / stats["total_original_refs"]
len_bounds = [0, 2, 10, 100, 1000, 5000, 10000, 20000, 50000, 100000, 1000000]
len_counts, len_bounds = np.histogram(stats["ref_lengths"], len_bounds)
len_dist = len_counts.astype(np.float32) / len_counts.sum()
wiki_coverage = (float(stats["num_wikis_written"]) /
stats["total_original_wikis"])
wikis_skipped_no_ref = (float(stats["wikis_skipped_no_refs"]) /
stats["total_original_wikis"])
wikis_skipped_no_lead = (float(stats["wikis_skipped_short_lead"]) /
stats["total_original_wikis"])
wiki_ref_coverage = [
float(found) / orig for found, orig
in zip(stats["wiki_found_refs"], stats["wiki_original_refs"]) if found
]
coverage_bounds = np.arange(21).astype(np.float32) / 20
coverage_counts, coverage_bounds = np.histogram(wiki_ref_coverage,
coverage_bounds)
coverage_dist = coverage_counts.astype(np.float32) / coverage_counts.sum()
agg_stats = dict(
total_original_wikis=stats["total_original_wikis"],
total_original_refs=stats["total_original_refs"],
wiki_coverage=wiki_coverage,
wikis_skipped_no_ref=wikis_skipped_no_ref,
wikis_skipped_no_lead=wikis_skipped_no_lead,
overall_ref_coverage=ref_coverage,
per_wiki_ref_coverage_dist=list((coverage_dist * 100).astype(int)),
per_wiki_ref_coverage_bounds=list((coverage_bounds * 100).astype(int)),
ref_len_dist=list((len_dist * 100).astype(int)),
ref_len_bounds=list(len_bounds),
)
return agg_stats
|
python
|
def aggregate_stats(stats_files):
"""Aggregate stats in per-shard stats files."""
all_stats = {}
for fname in stats_files:
with tf.gfile.Open(fname) as f:
stats = json.loads(f.read())
for k, v in stats.iteritems():
if k not in all_stats:
if isinstance(v, list):
all_stats[k] = []
else:
all_stats[k] = 0
if isinstance(v, list):
all_stats[k].extend(v)
else:
all_stats[k] += v
stats = all_stats
ref_coverage = float(stats["total_found_refs"]) / stats["total_original_refs"]
len_bounds = [0, 2, 10, 100, 1000, 5000, 10000, 20000, 50000, 100000, 1000000]
len_counts, len_bounds = np.histogram(stats["ref_lengths"], len_bounds)
len_dist = len_counts.astype(np.float32) / len_counts.sum()
wiki_coverage = (float(stats["num_wikis_written"]) /
stats["total_original_wikis"])
wikis_skipped_no_ref = (float(stats["wikis_skipped_no_refs"]) /
stats["total_original_wikis"])
wikis_skipped_no_lead = (float(stats["wikis_skipped_short_lead"]) /
stats["total_original_wikis"])
wiki_ref_coverage = [
float(found) / orig for found, orig
in zip(stats["wiki_found_refs"], stats["wiki_original_refs"]) if found
]
coverage_bounds = np.arange(21).astype(np.float32) / 20
coverage_counts, coverage_bounds = np.histogram(wiki_ref_coverage,
coverage_bounds)
coverage_dist = coverage_counts.astype(np.float32) / coverage_counts.sum()
agg_stats = dict(
total_original_wikis=stats["total_original_wikis"],
total_original_refs=stats["total_original_refs"],
wiki_coverage=wiki_coverage,
wikis_skipped_no_ref=wikis_skipped_no_ref,
wikis_skipped_no_lead=wikis_skipped_no_lead,
overall_ref_coverage=ref_coverage,
per_wiki_ref_coverage_dist=list((coverage_dist * 100).astype(int)),
per_wiki_ref_coverage_bounds=list((coverage_bounds * 100).astype(int)),
ref_len_dist=list((len_dist * 100).astype(int)),
ref_len_bounds=list(len_bounds),
)
return agg_stats
|
[
"def",
"aggregate_stats",
"(",
"stats_files",
")",
":",
"all_stats",
"=",
"{",
"}",
"for",
"fname",
"in",
"stats_files",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"fname",
")",
"as",
"f",
":",
"stats",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"stats",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"all_stats",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"all_stats",
"[",
"k",
"]",
"=",
"[",
"]",
"else",
":",
"all_stats",
"[",
"k",
"]",
"=",
"0",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"all_stats",
"[",
"k",
"]",
".",
"extend",
"(",
"v",
")",
"else",
":",
"all_stats",
"[",
"k",
"]",
"+=",
"v",
"stats",
"=",
"all_stats",
"ref_coverage",
"=",
"float",
"(",
"stats",
"[",
"\"total_found_refs\"",
"]",
")",
"/",
"stats",
"[",
"\"total_original_refs\"",
"]",
"len_bounds",
"=",
"[",
"0",
",",
"2",
",",
"10",
",",
"100",
",",
"1000",
",",
"5000",
",",
"10000",
",",
"20000",
",",
"50000",
",",
"100000",
",",
"1000000",
"]",
"len_counts",
",",
"len_bounds",
"=",
"np",
".",
"histogram",
"(",
"stats",
"[",
"\"ref_lengths\"",
"]",
",",
"len_bounds",
")",
"len_dist",
"=",
"len_counts",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"len_counts",
".",
"sum",
"(",
")",
"wiki_coverage",
"=",
"(",
"float",
"(",
"stats",
"[",
"\"num_wikis_written\"",
"]",
")",
"/",
"stats",
"[",
"\"total_original_wikis\"",
"]",
")",
"wikis_skipped_no_ref",
"=",
"(",
"float",
"(",
"stats",
"[",
"\"wikis_skipped_no_refs\"",
"]",
")",
"/",
"stats",
"[",
"\"total_original_wikis\"",
"]",
")",
"wikis_skipped_no_lead",
"=",
"(",
"float",
"(",
"stats",
"[",
"\"wikis_skipped_short_lead\"",
"]",
")",
"/",
"stats",
"[",
"\"total_original_wikis\"",
"]",
")",
"wiki_ref_coverage",
"=",
"[",
"float",
"(",
"found",
")",
"/",
"orig",
"for",
"found",
",",
"orig",
"in",
"zip",
"(",
"stats",
"[",
"\"wiki_found_refs\"",
"]",
",",
"stats",
"[",
"\"wiki_original_refs\"",
"]",
")",
"if",
"found",
"]",
"coverage_bounds",
"=",
"np",
".",
"arange",
"(",
"21",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"20",
"coverage_counts",
",",
"coverage_bounds",
"=",
"np",
".",
"histogram",
"(",
"wiki_ref_coverage",
",",
"coverage_bounds",
")",
"coverage_dist",
"=",
"coverage_counts",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"coverage_counts",
".",
"sum",
"(",
")",
"agg_stats",
"=",
"dict",
"(",
"total_original_wikis",
"=",
"stats",
"[",
"\"total_original_wikis\"",
"]",
",",
"total_original_refs",
"=",
"stats",
"[",
"\"total_original_refs\"",
"]",
",",
"wiki_coverage",
"=",
"wiki_coverage",
",",
"wikis_skipped_no_ref",
"=",
"wikis_skipped_no_ref",
",",
"wikis_skipped_no_lead",
"=",
"wikis_skipped_no_lead",
",",
"overall_ref_coverage",
"=",
"ref_coverage",
",",
"per_wiki_ref_coverage_dist",
"=",
"list",
"(",
"(",
"coverage_dist",
"*",
"100",
")",
".",
"astype",
"(",
"int",
")",
")",
",",
"per_wiki_ref_coverage_bounds",
"=",
"list",
"(",
"(",
"coverage_bounds",
"*",
"100",
")",
".",
"astype",
"(",
"int",
")",
")",
",",
"ref_len_dist",
"=",
"list",
"(",
"(",
"len_dist",
"*",
"100",
")",
".",
"astype",
"(",
"int",
")",
")",
",",
"ref_len_bounds",
"=",
"list",
"(",
"len_bounds",
")",
",",
")",
"return",
"agg_stats"
] |
Aggregate stats in per-shard stats files.
|
[
"Aggregate",
"stats",
"in",
"per",
"-",
"shard",
"stats",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/validate_data.py#L41-L91
|
22,253
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/validate_data.py
|
filename_to_task_id
|
def filename_to_task_id(fname):
"""Map filename to the task id that created it assuming 1k tasks."""
# This matches the order and size in WikisumBase.out_filepaths
fname = os.path.basename(fname)
shard_id_increment = {
"train": 0,
"dev": 800,
"test": 900,
}
parts = fname.split("-")
split = parts[1]
shard_id = parts[2]
task_id = int(shard_id) + shard_id_increment[split]
return task_id
|
python
|
def filename_to_task_id(fname):
"""Map filename to the task id that created it assuming 1k tasks."""
# This matches the order and size in WikisumBase.out_filepaths
fname = os.path.basename(fname)
shard_id_increment = {
"train": 0,
"dev": 800,
"test": 900,
}
parts = fname.split("-")
split = parts[1]
shard_id = parts[2]
task_id = int(shard_id) + shard_id_increment[split]
return task_id
|
[
"def",
"filename_to_task_id",
"(",
"fname",
")",
":",
"# This matches the order and size in WikisumBase.out_filepaths",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"shard_id_increment",
"=",
"{",
"\"train\"",
":",
"0",
",",
"\"dev\"",
":",
"800",
",",
"\"test\"",
":",
"900",
",",
"}",
"parts",
"=",
"fname",
".",
"split",
"(",
"\"-\"",
")",
"split",
"=",
"parts",
"[",
"1",
"]",
"shard_id",
"=",
"parts",
"[",
"2",
"]",
"task_id",
"=",
"int",
"(",
"shard_id",
")",
"+",
"shard_id_increment",
"[",
"split",
"]",
"return",
"task_id"
] |
Map filename to the task id that created it assuming 1k tasks.
|
[
"Map",
"filename",
"to",
"the",
"task",
"id",
"that",
"created",
"it",
"assuming",
"1k",
"tasks",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/validate_data.py#L94-L107
|
22,254
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/validate_data.py
|
validate_data_files
|
def validate_data_files(problem, data_files, min_size):
"""Validate presence and minimum size of files."""
# Check that all files are present
data_dir = os.path.split(data_files[0])[0]
out_filepaths = problem.out_filepaths(data_dir)
missing_filepaths = set(out_filepaths) - set(data_files)
if missing_filepaths:
tf.logging.error("Missing %d data files", len(missing_filepaths))
# Check that each file is at least 100M
too_small = []
for data_file in data_files:
length = get_length(data_file)
if length < min_size:
too_small.append(data_file)
if too_small:
tf.logging.error("%d files too small", len(too_small))
bad_files = too_small + list(missing_filepaths)
return bad_files
|
python
|
def validate_data_files(problem, data_files, min_size):
"""Validate presence and minimum size of files."""
# Check that all files are present
data_dir = os.path.split(data_files[0])[0]
out_filepaths = problem.out_filepaths(data_dir)
missing_filepaths = set(out_filepaths) - set(data_files)
if missing_filepaths:
tf.logging.error("Missing %d data files", len(missing_filepaths))
# Check that each file is at least 100M
too_small = []
for data_file in data_files:
length = get_length(data_file)
if length < min_size:
too_small.append(data_file)
if too_small:
tf.logging.error("%d files too small", len(too_small))
bad_files = too_small + list(missing_filepaths)
return bad_files
|
[
"def",
"validate_data_files",
"(",
"problem",
",",
"data_files",
",",
"min_size",
")",
":",
"# Check that all files are present",
"data_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"data_files",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"out_filepaths",
"=",
"problem",
".",
"out_filepaths",
"(",
"data_dir",
")",
"missing_filepaths",
"=",
"set",
"(",
"out_filepaths",
")",
"-",
"set",
"(",
"data_files",
")",
"if",
"missing_filepaths",
":",
"tf",
".",
"logging",
".",
"error",
"(",
"\"Missing %d data files\"",
",",
"len",
"(",
"missing_filepaths",
")",
")",
"# Check that each file is at least 100M",
"too_small",
"=",
"[",
"]",
"for",
"data_file",
"in",
"data_files",
":",
"length",
"=",
"get_length",
"(",
"data_file",
")",
"if",
"length",
"<",
"min_size",
":",
"too_small",
".",
"append",
"(",
"data_file",
")",
"if",
"too_small",
":",
"tf",
".",
"logging",
".",
"error",
"(",
"\"%d files too small\"",
",",
"len",
"(",
"too_small",
")",
")",
"bad_files",
"=",
"too_small",
"+",
"list",
"(",
"missing_filepaths",
")",
"return",
"bad_files"
] |
Validate presence and minimum size of files.
|
[
"Validate",
"presence",
"and",
"minimum",
"size",
"of",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/validate_data.py#L114-L133
|
22,255
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/lambada.py
|
_prepare_lambada_data
|
def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):
"""Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tar_all = tarfile.open(file_path)
tar_all.extractall(tmp_dir)
tar_all.close()
tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar"))
tar_train.extractall(tmp_dir)
tar_train.close()
vocab_path = os.path.join(data_dir, vocab_filename)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile:
reader = csv.reader(infile, delimiter="\t")
words = [row[0] for row in reader]
words = [_UNK] + words[:vocab_size]
with tf.gfile.GFile(vocab_path, "w") as outfile:
outfile.write("\n".join(words))
|
python
|
def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):
"""Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tar_all = tarfile.open(file_path)
tar_all.extractall(tmp_dir)
tar_all.close()
tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar"))
tar_train.extractall(tmp_dir)
tar_train.close()
vocab_path = os.path.join(data_dir, vocab_filename)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile:
reader = csv.reader(infile, delimiter="\t")
words = [row[0] for row in reader]
words = [_UNK] + words[:vocab_size]
with tf.gfile.GFile(vocab_path, "w") as outfile:
outfile.write("\n".join(words))
|
[
"def",
"_prepare_lambada_data",
"(",
"tmp_dir",
",",
"data_dir",
",",
"vocab_size",
",",
"vocab_filename",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"data_dir",
")",
":",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"data_dir",
")",
"file_path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"_TAR",
",",
"_URL",
")",
"tar_all",
"=",
"tarfile",
".",
"open",
"(",
"file_path",
")",
"tar_all",
".",
"extractall",
"(",
"tmp_dir",
")",
"tar_all",
".",
"close",
"(",
")",
"tar_train",
"=",
"tarfile",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"train-novels.tar\"",
")",
")",
"tar_train",
".",
"extractall",
"(",
"tmp_dir",
")",
"tar_train",
".",
"close",
"(",
")",
"vocab_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"vocab_filename",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_path",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"_VOCAB",
")",
",",
"\"r\"",
")",
"as",
"infile",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"infile",
",",
"delimiter",
"=",
"\"\\t\"",
")",
"words",
"=",
"[",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"reader",
"]",
"words",
"=",
"[",
"_UNK",
"]",
"+",
"words",
"[",
":",
"vocab_size",
"]",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"vocab_path",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"words",
")",
")"
] |
Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file
|
[
"Downloading",
"and",
"preparing",
"the",
"dataset",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/lambada.py#L57-L86
|
22,256
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/lambada.py
|
get_dataset_split
|
def get_dataset_split(tmp_dir, split, use_control_set):
"""Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths.
"""
if not use_control_set:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_development_plain_text.txt")
],
problem.DatasetSplit.TEST: [
os.path.join(tmp_dir, "lambada_test_plain_text.txt")
]
}
else:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt")
],
}
return dataset_split[split]
|
python
|
def get_dataset_split(tmp_dir, split, use_control_set):
"""Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths.
"""
if not use_control_set:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_development_plain_text.txt")
],
problem.DatasetSplit.TEST: [
os.path.join(tmp_dir, "lambada_test_plain_text.txt")
]
}
else:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt")
],
}
return dataset_split[split]
|
[
"def",
"get_dataset_split",
"(",
"tmp_dir",
",",
"split",
",",
"use_control_set",
")",
":",
"if",
"not",
"use_control_set",
":",
"dataset_split",
"=",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"[",
"f",
"for",
"f",
"in",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"train-novels/*/*.txt\"",
")",
")",
"]",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"lambada_development_plain_text.txt\"",
")",
"]",
",",
"problem",
".",
"DatasetSplit",
".",
"TEST",
":",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"lambada_test_plain_text.txt\"",
")",
"]",
"}",
"else",
":",
"dataset_split",
"=",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"[",
"f",
"for",
"f",
"in",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"train-novels/*/*.txt\"",
")",
")",
"]",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"\"lambada_control_test_data_plain_text.txt\"",
")",
"]",
",",
"}",
"return",
"dataset_split",
"[",
"split",
"]"
] |
Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths.
|
[
"Gives",
"the",
"file",
"paths",
"with",
"regards",
"to",
"the",
"given",
"split",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/lambada.py#L89-L126
|
22,257
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/transduction_problems.py
|
TransductionProblem.min_sequence_length
|
def min_sequence_length(self, dataset_split):
"""Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 8,
problem.DatasetSplit.EVAL: 65,
problem.DatasetSplit.TEST: 65
}[dataset_split]
|
python
|
def min_sequence_length(self, dataset_split):
"""Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 8,
problem.DatasetSplit.EVAL: 65,
problem.DatasetSplit.TEST: 65
}[dataset_split]
|
[
"def",
"min_sequence_length",
"(",
"self",
",",
"dataset_split",
")",
":",
"return",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"8",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"65",
",",
"problem",
".",
"DatasetSplit",
".",
"TEST",
":",
"65",
"}",
"[",
"dataset_split",
"]"
] |
Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
|
[
"Determine",
"the",
"minimum",
"sequence",
"length",
"given",
"a",
"dataset_split",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/transduction_problems.py#L63-L76
|
22,258
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/transduction_problems.py
|
TransductionProblem.max_sequence_length
|
def max_sequence_length(self, dataset_split):
"""Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 64,
problem.DatasetSplit.EVAL: 128,
problem.DatasetSplit.TEST: 128
}[dataset_split]
|
python
|
def max_sequence_length(self, dataset_split):
"""Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 64,
problem.DatasetSplit.EVAL: 128,
problem.DatasetSplit.TEST: 128
}[dataset_split]
|
[
"def",
"max_sequence_length",
"(",
"self",
",",
"dataset_split",
")",
":",
"return",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"64",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"128",
",",
"problem",
".",
"DatasetSplit",
".",
"TEST",
":",
"128",
"}",
"[",
"dataset_split",
"]"
] |
Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split.
|
[
"Determine",
"the",
"maximum",
"sequence",
"length",
"given",
"a",
"dataset_split",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/transduction_problems.py#L78-L91
|
22,259
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/transduction_problems.py
|
TransductionProblem.num_samples
|
def num_samples(self, dataset_split):
"""Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 1000000,
problem.DatasetSplit.EVAL: 10000,
problem.DatasetSplit.TEST: 10000
}[dataset_split]
|
python
|
def num_samples(self, dataset_split):
"""Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 1000000,
problem.DatasetSplit.EVAL: 10000,
problem.DatasetSplit.TEST: 10000
}[dataset_split]
|
[
"def",
"num_samples",
"(",
"self",
",",
"dataset_split",
")",
":",
"return",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"1000000",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"10000",
",",
"problem",
".",
"DatasetSplit",
".",
"TEST",
":",
"10000",
"}",
"[",
"dataset_split",
"]"
] |
Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
|
[
"Determine",
"the",
"dataset",
"sized",
"given",
"a",
"dataset_split",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/transduction_problems.py#L93-L106
|
22,260
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
create_session_config
|
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config
|
python
|
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config
|
[
"def",
"create_session_config",
"(",
"log_device_placement",
"=",
"False",
",",
"enable_graph_rewriter",
"=",
"False",
",",
"gpu_mem_fraction",
"=",
"0.95",
",",
"use_tpu",
"=",
"False",
",",
"xla_jit_level",
"=",
"tf",
".",
"OptimizerOptions",
".",
"OFF",
",",
"inter_op_parallelism_threads",
"=",
"0",
",",
"intra_op_parallelism_threads",
"=",
"0",
")",
":",
"if",
"use_tpu",
":",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
")",
"else",
":",
"if",
"enable_graph_rewriter",
":",
"rewrite_options",
"=",
"rewriter_config_pb2",
".",
"RewriterConfig",
"(",
")",
"rewrite_options",
".",
"layout_optimizer",
"=",
"rewriter_config_pb2",
".",
"RewriterConfig",
".",
"ON",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
"rewrite_options",
"=",
"rewrite_options",
")",
"else",
":",
"graph_options",
"=",
"tf",
".",
"GraphOptions",
"(",
"optimizer_options",
"=",
"tf",
".",
"OptimizerOptions",
"(",
"opt_level",
"=",
"tf",
".",
"OptimizerOptions",
".",
"L1",
",",
"do_function_inlining",
"=",
"False",
",",
"global_jit_level",
"=",
"xla_jit_level",
")",
")",
"gpu_options",
"=",
"tf",
".",
"GPUOptions",
"(",
"per_process_gpu_memory_fraction",
"=",
"gpu_mem_fraction",
")",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
",",
"graph_options",
"=",
"graph_options",
",",
"gpu_options",
"=",
"gpu_options",
",",
"log_device_placement",
"=",
"log_device_placement",
",",
"inter_op_parallelism_threads",
"=",
"inter_op_parallelism_threads",
",",
"intra_op_parallelism_threads",
"=",
"intra_op_parallelism_threads",
",",
"isolate_session_state",
"=",
"True",
")",
"return",
"config"
] |
The TensorFlow Session config to use.
|
[
"The",
"TensorFlow",
"Session",
"config",
"to",
"use",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L105-L137
|
22,261
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
create_estimator
|
def create_estimator(model_name,
hparams,
run_config,
schedule="train_and_evaluate",
decode_hparams=None,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False):
"""Create a T2T Estimator."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu)
del use_xla
if use_tpu or use_tpu_estimator:
problem = hparams.problem
batch_size = (
problem.tpu_batch_size_per_shard(hparams) *
run_config.tpu_config.num_shards)
mlperf_log.transformer_print(
key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size)
if getattr(hparams, "mtf_mode", False):
batch_size = problem.tpu_batch_size_per_shard(hparams)
predict_batch_size = batch_size
if decode_hparams and decode_hparams.batch_size:
predict_batch_size = decode_hparams.batch_size
if decode_hparams and run_config.tpu_config:
decode_hparams.add_hparam("iterations_per_loop",
run_config.tpu_config.iterations_per_loop)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
use_tpu=use_tpu,
train_batch_size=batch_size,
eval_batch_size=batch_size if "eval" in schedule else None,
predict_batch_size=predict_batch_size,
experimental_export_device_assignment=True)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
)
return estimator
|
python
|
def create_estimator(model_name,
hparams,
run_config,
schedule="train_and_evaluate",
decode_hparams=None,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False):
"""Create a T2T Estimator."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu)
del use_xla
if use_tpu or use_tpu_estimator:
problem = hparams.problem
batch_size = (
problem.tpu_batch_size_per_shard(hparams) *
run_config.tpu_config.num_shards)
mlperf_log.transformer_print(
key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size)
if getattr(hparams, "mtf_mode", False):
batch_size = problem.tpu_batch_size_per_shard(hparams)
predict_batch_size = batch_size
if decode_hparams and decode_hparams.batch_size:
predict_batch_size = decode_hparams.batch_size
if decode_hparams and run_config.tpu_config:
decode_hparams.add_hparam("iterations_per_loop",
run_config.tpu_config.iterations_per_loop)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
use_tpu=use_tpu,
train_batch_size=batch_size,
eval_batch_size=batch_size if "eval" in schedule else None,
predict_batch_size=predict_batch_size,
experimental_export_device_assignment=True)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
)
return estimator
|
[
"def",
"create_estimator",
"(",
"model_name",
",",
"hparams",
",",
"run_config",
",",
"schedule",
"=",
"\"train_and_evaluate\"",
",",
"decode_hparams",
"=",
"None",
",",
"use_tpu",
"=",
"False",
",",
"use_tpu_estimator",
"=",
"False",
",",
"use_xla",
"=",
"False",
")",
":",
"model_fn",
"=",
"t2t_model",
".",
"T2TModel",
".",
"make_estimator_model_fn",
"(",
"model_name",
",",
"hparams",
",",
"decode_hparams",
"=",
"decode_hparams",
",",
"use_tpu",
"=",
"use_tpu",
")",
"del",
"use_xla",
"if",
"use_tpu",
"or",
"use_tpu_estimator",
":",
"problem",
"=",
"hparams",
".",
"problem",
"batch_size",
"=",
"(",
"problem",
".",
"tpu_batch_size_per_shard",
"(",
"hparams",
")",
"*",
"run_config",
".",
"tpu_config",
".",
"num_shards",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"INPUT_BATCH_SIZE",
",",
"value",
"=",
"batch_size",
")",
"if",
"getattr",
"(",
"hparams",
",",
"\"mtf_mode\"",
",",
"False",
")",
":",
"batch_size",
"=",
"problem",
".",
"tpu_batch_size_per_shard",
"(",
"hparams",
")",
"predict_batch_size",
"=",
"batch_size",
"if",
"decode_hparams",
"and",
"decode_hparams",
".",
"batch_size",
":",
"predict_batch_size",
"=",
"decode_hparams",
".",
"batch_size",
"if",
"decode_hparams",
"and",
"run_config",
".",
"tpu_config",
":",
"decode_hparams",
".",
"add_hparam",
"(",
"\"iterations_per_loop\"",
",",
"run_config",
".",
"tpu_config",
".",
"iterations_per_loop",
")",
"estimator",
"=",
"tf",
".",
"contrib",
".",
"tpu",
".",
"TPUEstimator",
"(",
"model_fn",
"=",
"model_fn",
",",
"model_dir",
"=",
"run_config",
".",
"model_dir",
",",
"config",
"=",
"run_config",
",",
"use_tpu",
"=",
"use_tpu",
",",
"train_batch_size",
"=",
"batch_size",
",",
"eval_batch_size",
"=",
"batch_size",
"if",
"\"eval\"",
"in",
"schedule",
"else",
"None",
",",
"predict_batch_size",
"=",
"predict_batch_size",
",",
"experimental_export_device_assignment",
"=",
"True",
")",
"else",
":",
"estimator",
"=",
"tf",
".",
"estimator",
".",
"Estimator",
"(",
"model_fn",
"=",
"model_fn",
",",
"model_dir",
"=",
"run_config",
".",
"model_dir",
",",
"config",
"=",
"run_config",
",",
")",
"return",
"estimator"
] |
Create a T2T Estimator.
|
[
"Create",
"a",
"T2T",
"Estimator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L281-L325
|
22,262
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
create_hooks
|
def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment."""
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks
|
python
|
def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment."""
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks
|
[
"def",
"create_hooks",
"(",
"use_tfdbg",
"=",
"False",
",",
"use_dbgprofile",
"=",
"False",
",",
"dbgprofile_kwargs",
"=",
"None",
",",
"use_validation_monitor",
"=",
"False",
",",
"validation_monitor_kwargs",
"=",
"None",
",",
"use_early_stopping",
"=",
"False",
",",
"early_stopping_kwargs",
"=",
"None",
")",
":",
"train_hooks",
"=",
"[",
"]",
"eval_hooks",
"=",
"[",
"]",
"if",
"use_tfdbg",
":",
"hook",
"=",
"debug",
".",
"LocalCLIDebugHook",
"(",
")",
"train_hooks",
".",
"append",
"(",
"hook",
")",
"eval_hooks",
".",
"append",
"(",
"hook",
")",
"if",
"use_dbgprofile",
":",
"# Recorded traces can be visualized with chrome://tracing/",
"# The memory/tensor lifetime is also profiled",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using ProfilerHook\"",
")",
"defaults",
"=",
"dict",
"(",
"save_steps",
"=",
"10",
",",
"show_dataflow",
"=",
"True",
",",
"show_memory",
"=",
"True",
")",
"defaults",
".",
"update",
"(",
"dbgprofile_kwargs",
")",
"train_hooks",
".",
"append",
"(",
"tf",
".",
"train",
".",
"ProfilerHook",
"(",
"*",
"*",
"defaults",
")",
")",
"if",
"use_validation_monitor",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using ValidationMonitor\"",
")",
"train_hooks",
".",
"append",
"(",
"tf",
".",
"contrib",
".",
"learn",
".",
"monitors",
".",
"ValidationMonitor",
"(",
"hooks",
"=",
"eval_hooks",
",",
"*",
"*",
"validation_monitor_kwargs",
")",
")",
"if",
"use_early_stopping",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using EarlyStoppingHook\"",
")",
"hook",
"=",
"metrics_hook",
".",
"EarlyStoppingHook",
"(",
"*",
"*",
"early_stopping_kwargs",
")",
"# Adding to both training and eval so that eval aborts as well",
"train_hooks",
".",
"append",
"(",
"hook",
")",
"eval_hooks",
".",
"append",
"(",
"hook",
")",
"return",
"train_hooks",
",",
"eval_hooks"
] |
Create train and eval hooks for Experiment.
|
[
"Create",
"train",
"and",
"eval",
"hooks",
"for",
"Experiment",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L328-L365
|
22,263
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
create_experiment_fn
|
def create_experiment_fn(*args, **kwargs):
"""Wrapper for canonical experiment_fn. See create_experiment."""
def experiment_fn(run_config, hparams):
return create_experiment(run_config, hparams, *args, **kwargs)
return experiment_fn
|
python
|
def create_experiment_fn(*args, **kwargs):
"""Wrapper for canonical experiment_fn. See create_experiment."""
def experiment_fn(run_config, hparams):
return create_experiment(run_config, hparams, *args, **kwargs)
return experiment_fn
|
[
"def",
"create_experiment_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"experiment_fn",
"(",
"run_config",
",",
"hparams",
")",
":",
"return",
"create_experiment",
"(",
"run_config",
",",
"hparams",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"experiment_fn"
] |
Wrapper for canonical experiment_fn. See create_experiment.
|
[
"Wrapper",
"for",
"canonical",
"experiment_fn",
".",
"See",
"create_experiment",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L770-L776
|
22,264
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
restore_checkpoint
|
def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False):
"""Restore from a checkpoint."""
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if must_restore and not ckpt:
raise ValueError("No checkpoint found in %s" % ckpt_dir)
if not ckpt:
return 0
path = ckpt.model_checkpoint_path
tf.logging.info("Restoring checkpoint %s", path)
saver.restore(sess, path)
step = int(path.split("-")[-1])
return step
|
python
|
def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False):
"""Restore from a checkpoint."""
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if must_restore and not ckpt:
raise ValueError("No checkpoint found in %s" % ckpt_dir)
if not ckpt:
return 0
path = ckpt.model_checkpoint_path
tf.logging.info("Restoring checkpoint %s", path)
saver.restore(sess, path)
step = int(path.split("-")[-1])
return step
|
[
"def",
"restore_checkpoint",
"(",
"ckpt_dir",
",",
"saver",
",",
"sess",
",",
"must_restore",
"=",
"False",
")",
":",
"ckpt",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"ckpt_dir",
")",
"if",
"must_restore",
"and",
"not",
"ckpt",
":",
"raise",
"ValueError",
"(",
"\"No checkpoint found in %s\"",
"%",
"ckpt_dir",
")",
"if",
"not",
"ckpt",
":",
"return",
"0",
"path",
"=",
"ckpt",
".",
"model_checkpoint_path",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Restoring checkpoint %s\"",
",",
"path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"path",
")",
"step",
"=",
"int",
"(",
"path",
".",
"split",
"(",
"\"-\"",
")",
"[",
"-",
"1",
"]",
")",
"return",
"step"
] |
Restore from a checkpoint.
|
[
"Restore",
"from",
"a",
"checkpoint",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L785-L797
|
22,265
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.train_eval_and_decode
|
def train_eval_and_decode(self):
"""Does eval and decode after training every eval_freq_in_steps."""
eval_steps = self._hparams.eval_freq_in_steps
packed_dataset = "_packed" in self._hparams.problem.name
mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP)
for i in range(0, self._train_spec.max_steps, eval_steps):
mlperf_log.transformer_print(
key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps)
if packed_dataset and i > 0:
problem = registry.problem(self._hparams.problem.name + "_packed")
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
self._estimator.train(
self._train_spec.input_fn,
steps=eval_steps,
hooks=self._train_spec.hooks)
self._set_eval_dir_name("eval")
self._estimator.evaluate(
self._eval_spec.input_fn,
steps=self._eval_spec.steps,
hooks=self._eval_spec.hooks,
name="eval")
if packed_dataset:
problem = registry.problem(
self._hparams.problem.name.replace("_packed", ""))
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = i + eval_steps
self.decode(dataset_split=tf.estimator.ModeKeys.EVAL)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"})
|
python
|
def train_eval_and_decode(self):
"""Does eval and decode after training every eval_freq_in_steps."""
eval_steps = self._hparams.eval_freq_in_steps
packed_dataset = "_packed" in self._hparams.problem.name
mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP)
for i in range(0, self._train_spec.max_steps, eval_steps):
mlperf_log.transformer_print(
key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps)
if packed_dataset and i > 0:
problem = registry.problem(self._hparams.problem.name + "_packed")
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
self._estimator.train(
self._train_spec.input_fn,
steps=eval_steps,
hooks=self._train_spec.hooks)
self._set_eval_dir_name("eval")
self._estimator.evaluate(
self._eval_spec.input_fn,
steps=self._eval_spec.steps,
hooks=self._eval_spec.hooks,
name="eval")
if packed_dataset:
problem = registry.problem(
self._hparams.problem.name.replace("_packed", ""))
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = i + eval_steps
self.decode(dataset_split=tf.estimator.ModeKeys.EVAL)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"})
|
[
"def",
"train_eval_and_decode",
"(",
"self",
")",
":",
"eval_steps",
"=",
"self",
".",
"_hparams",
".",
"eval_freq_in_steps",
"packed_dataset",
"=",
"\"_packed\"",
"in",
"self",
".",
"_hparams",
".",
"problem",
".",
"name",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"TRAIN_LOOP",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_train_spec",
".",
"max_steps",
",",
"eval_steps",
")",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"TRAIN_EPOCH",
",",
"value",
"=",
"i",
"//",
"eval_steps",
")",
"if",
"packed_dataset",
"and",
"i",
">",
"0",
":",
"problem",
"=",
"registry",
".",
"problem",
"(",
"self",
".",
"_hparams",
".",
"problem",
".",
"name",
"+",
"\"_packed\"",
")",
"p_hparams",
"=",
"problem",
".",
"get_hparams",
"(",
"self",
".",
"_hparams",
")",
"self",
".",
"_hparams",
".",
"problem",
"=",
"problem",
"self",
".",
"_hparams",
".",
"problem_hparams",
"=",
"p_hparams",
"self",
".",
"_estimator",
".",
"train",
"(",
"self",
".",
"_train_spec",
".",
"input_fn",
",",
"steps",
"=",
"eval_steps",
",",
"hooks",
"=",
"self",
".",
"_train_spec",
".",
"hooks",
")",
"self",
".",
"_set_eval_dir_name",
"(",
"\"eval\"",
")",
"self",
".",
"_estimator",
".",
"evaluate",
"(",
"self",
".",
"_eval_spec",
".",
"input_fn",
",",
"steps",
"=",
"self",
".",
"_eval_spec",
".",
"steps",
",",
"hooks",
"=",
"self",
".",
"_eval_spec",
".",
"hooks",
",",
"name",
"=",
"\"eval\"",
")",
"if",
"packed_dataset",
":",
"problem",
"=",
"registry",
".",
"problem",
"(",
"self",
".",
"_hparams",
".",
"problem",
".",
"name",
".",
"replace",
"(",
"\"_packed\"",
",",
"\"\"",
")",
")",
"p_hparams",
"=",
"problem",
".",
"get_hparams",
"(",
"self",
".",
"_hparams",
")",
"self",
".",
"_hparams",
".",
"problem",
"=",
"problem",
"self",
".",
"_hparams",
".",
"problem_hparams",
"=",
"p_hparams",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"EVAL_START",
")",
"if",
"self",
".",
"_hparams",
".",
"mlperf_mode",
":",
"self",
".",
"_decode_hparams",
".",
"mlperf_decode_step",
"=",
"i",
"+",
"eval_steps",
"self",
".",
"decode",
"(",
"dataset_split",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
")",
"d_hparams",
"=",
"self",
".",
"_decode_hparams",
"if",
"self",
".",
"_hparams",
".",
"mlperf_mode",
"and",
"d_hparams",
".",
"mlperf_success",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"RUN_STOP",
",",
"value",
"=",
"{",
"\"success\"",
":",
"\"true\"",
"}",
")",
"break",
"d_hparams",
"=",
"self",
".",
"_decode_hparams",
"if",
"self",
".",
"_hparams",
".",
"mlperf_mode",
"and",
"not",
"d_hparams",
".",
"mlperf_success",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"RUN_STOP",
",",
"value",
"=",
"{",
"\"success\"",
":",
"\"false\"",
"}",
")"
] |
Does eval and decode after training every eval_freq_in_steps.
|
[
"Does",
"eval",
"and",
"decode",
"after",
"training",
"every",
"eval_freq_in_steps",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L419-L461
|
22,266
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.continuous_eval
|
def continuous_eval(self):
"""Evaluate until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate()
|
python
|
def continuous_eval(self):
"""Evaluate until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate()
|
[
"def",
"continuous_eval",
"(",
"self",
")",
":",
"for",
"ckpt_path",
"in",
"next_checkpoint",
"(",
"self",
".",
"_hparams",
".",
"model_dir",
",",
"self",
".",
"_hparams",
".",
"eval_timeout_mins",
")",
":",
"# Skip zero'th step.",
"train_step",
"=",
"decoding",
".",
"get_step_from_ckpt_path",
"(",
"ckpt_path",
")",
"if",
"train_step",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Skipping evaluation at step 0\"",
")",
"continue",
"self",
".",
"evaluate",
"(",
")"
] |
Evaluate until checkpoints stop being produced.
|
[
"Evaluate",
"until",
"checkpoints",
"stop",
"being",
"produced",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L488-L497
|
22,267
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.continuous_eval_on_train_data
|
def continuous_eval_on_train_data(self):
"""Evaluate on train data until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate_on_train_data()
|
python
|
def continuous_eval_on_train_data(self):
"""Evaluate on train data until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate_on_train_data()
|
[
"def",
"continuous_eval_on_train_data",
"(",
"self",
")",
":",
"for",
"ckpt_path",
"in",
"next_checkpoint",
"(",
"self",
".",
"_hparams",
".",
"model_dir",
",",
"self",
".",
"_hparams",
".",
"eval_timeout_mins",
")",
":",
"# Skip zero'th step.",
"train_step",
"=",
"decoding",
".",
"get_step_from_ckpt_path",
"(",
"ckpt_path",
")",
"if",
"train_step",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Skipping evaluation at step 0\"",
")",
"continue",
"self",
".",
"evaluate_on_train_data",
"(",
")"
] |
Evaluate on train data until checkpoints stop being produced.
|
[
"Evaluate",
"on",
"train",
"data",
"until",
"checkpoints",
"stop",
"being",
"produced",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L499-L508
|
22,268
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.run_std_server
|
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
config = tf.estimator.RunConfig()
server = tf.train.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
protocol=config.protocol)
server.join()
|
python
|
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
config = tf.estimator.RunConfig()
server = tf.train.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
protocol=config.protocol)
server.join()
|
[
"def",
"run_std_server",
"(",
"self",
")",
":",
"config",
"=",
"tf",
".",
"estimator",
".",
"RunConfig",
"(",
")",
"server",
"=",
"tf",
".",
"train",
".",
"Server",
"(",
"config",
".",
"cluster_spec",
",",
"job_name",
"=",
"config",
".",
"task_type",
",",
"task_index",
"=",
"config",
".",
"task_id",
",",
"protocol",
"=",
"config",
".",
"protocol",
")",
"server",
".",
"join",
"(",
")"
] |
Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
|
[
"Starts",
"a",
"TensorFlow",
"server",
"and",
"joins",
"the",
"serving",
"thread",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L521-L536
|
22,269
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.decode
|
def decode(self,
dataset_split=None,
decode_from_file=False,
checkpoint_path=None):
"""Decodes from dataset or file."""
if decode_from_file:
decoding.decode_from_file(self._estimator,
self._decode_hparams.decode_from_file,
self._hparams,
self._decode_hparams,
self._decode_hparams.decode_to_file)
else:
decoding.decode_from_dataset(
self._estimator,
self._hparams.problem.name,
self._hparams,
self._decode_hparams,
dataset_split=dataset_split,
checkpoint_path=checkpoint_path)
|
python
|
def decode(self,
dataset_split=None,
decode_from_file=False,
checkpoint_path=None):
"""Decodes from dataset or file."""
if decode_from_file:
decoding.decode_from_file(self._estimator,
self._decode_hparams.decode_from_file,
self._hparams,
self._decode_hparams,
self._decode_hparams.decode_to_file)
else:
decoding.decode_from_dataset(
self._estimator,
self._hparams.problem.name,
self._hparams,
self._decode_hparams,
dataset_split=dataset_split,
checkpoint_path=checkpoint_path)
|
[
"def",
"decode",
"(",
"self",
",",
"dataset_split",
"=",
"None",
",",
"decode_from_file",
"=",
"False",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"if",
"decode_from_file",
":",
"decoding",
".",
"decode_from_file",
"(",
"self",
".",
"_estimator",
",",
"self",
".",
"_decode_hparams",
".",
"decode_from_file",
",",
"self",
".",
"_hparams",
",",
"self",
".",
"_decode_hparams",
",",
"self",
".",
"_decode_hparams",
".",
"decode_to_file",
")",
"else",
":",
"decoding",
".",
"decode_from_dataset",
"(",
"self",
".",
"_estimator",
",",
"self",
".",
"_hparams",
".",
"problem",
".",
"name",
",",
"self",
".",
"_hparams",
",",
"self",
".",
"_decode_hparams",
",",
"dataset_split",
"=",
"dataset_split",
",",
"checkpoint_path",
"=",
"checkpoint_path",
")"
] |
Decodes from dataset or file.
|
[
"Decodes",
"from",
"dataset",
"or",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L538-L556
|
22,270
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/trainer_lib.py
|
T2TExperiment.continuous_decode_from_file
|
def continuous_decode_from_file(self):
"""Decode from file on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(decode_from_file=True)
|
python
|
def continuous_decode_from_file(self):
"""Decode from file on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(decode_from_file=True)
|
[
"def",
"continuous_decode_from_file",
"(",
"self",
")",
":",
"for",
"_",
"in",
"next_checkpoint",
"(",
"self",
".",
"_hparams",
".",
"model_dir",
",",
"self",
".",
"_decode_hparams",
".",
"decode_timeout_mins",
")",
":",
"self",
".",
"decode",
"(",
"decode_from_file",
"=",
"True",
")"
] |
Decode from file on new checkpoint.
|
[
"Decode",
"from",
"file",
"on",
"new",
"checkpoint",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/trainer_lib.py#L606-L610
|
22,271
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
_flatten_dict
|
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict
|
python
|
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict
|
[
"def",
"_flatten_dict",
"(",
"original_dict",
")",
":",
"flat_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"original_dict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"for",
"name",
",",
"tensor",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"tensor",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"flatten_dict only handles 2 levels of nesting.\"",
")",
"flat_key",
"=",
"\"__\"",
"+",
"key",
"+",
"\"_\"",
"+",
"name",
"flat_dict",
"[",
"flat_key",
"]",
"=",
"tensor",
"else",
":",
"flat_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"flat_dict"
] |
Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
|
[
"Flatten",
"dict",
"of",
"dicts",
"into",
"a",
"single",
"dict",
"with",
"appropriate",
"prefixes",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L63-L87
|
22,272
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
_unflatten_dict
|
def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
"""
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict
|
python
|
def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
"""
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict
|
[
"def",
"_unflatten_dict",
"(",
"flat_dict",
",",
"prefixes",
")",
":",
"original_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"flat_dict",
".",
"items",
"(",
")",
":",
"prefix_found",
"=",
"False",
"for",
"prefix",
"in",
"prefixes",
":",
"full_prefix",
"=",
"\"__\"",
"+",
"prefix",
"+",
"\"_\"",
"if",
"key",
".",
"startswith",
"(",
"full_prefix",
")",
":",
"# Add a dict to the original dict with key=prefix",
"if",
"prefix",
"not",
"in",
"original_dict",
":",
"original_dict",
"[",
"prefix",
"]",
"=",
"{",
"}",
"original_dict",
"[",
"prefix",
"]",
"[",
"key",
"[",
"len",
"(",
"full_prefix",
")",
":",
"]",
"]",
"=",
"value",
"prefix_found",
"=",
"True",
"break",
"if",
"not",
"prefix_found",
":",
"# No key matched a prefix in the for loop.",
"original_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"original_dict"
] |
Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
|
[
"Returns",
"a",
"dict",
"of",
"dicts",
"if",
"any",
"prefixes",
"match",
"keys",
"in",
"the",
"flat",
"dict",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L90-L117
|
22,273
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
create_dummy_vars
|
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
|
python
|
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
|
[
"def",
"create_dummy_vars",
"(",
")",
":",
"var_names",
"=",
"set",
"(",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"tf",
".",
"global_variables",
"(",
")",
"]",
")",
"if",
"\"losses_avg/problem_0/total_loss:0\"",
"in",
"var_names",
":",
"return",
"with",
"tf",
".",
"variable_scope",
"(",
"\"losses_avg\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"problem_0\"",
")",
":",
"for",
"var_name",
"in",
"[",
"\"total\"",
",",
"\"extra\"",
",",
"\"training\"",
"]",
":",
"tf",
".",
"get_variable",
"(",
"\"%s_loss\"",
"%",
"var_name",
",",
"initializer",
"=",
"100.0",
",",
"trainable",
"=",
"False",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"train_stats\"",
")",
":",
"tf",
".",
"get_variable",
"(",
"\"problem_0_steps\"",
",",
"initializer",
"=",
"0",
",",
"trainable",
"=",
"False",
")"
] |
Dummy vars for restore to work when not using TPU codepath.
|
[
"Dummy",
"vars",
"for",
"restore",
"to",
"work",
"when",
"not",
"using",
"TPU",
"codepath",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L1916-L1927
|
22,274
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
remove_summaries
|
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
|
python
|
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
|
[
"def",
"remove_summaries",
"(",
")",
":",
"g",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"key",
"=",
"tf",
".",
"GraphKeys",
".",
"SUMMARIES",
"log_debug",
"(",
"\"Remove summaries %s\"",
"%",
"str",
"(",
"g",
".",
"get_collection",
"(",
"key",
")",
")",
")",
"del",
"g",
".",
"get_collection_ref",
"(",
"key",
")",
"[",
":",
"]",
"assert",
"not",
"g",
".",
"get_collection",
"(",
"key",
")"
] |
Remove summaries from the default graph.
|
[
"Remove",
"summaries",
"from",
"the",
"default",
"graph",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2018-L2024
|
22,275
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
create_host_call
|
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs)
|
python
|
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs)
|
[
"def",
"create_host_call",
"(",
"model_dir",
")",
":",
"graph",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"summaries",
"=",
"graph",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"SUMMARIES",
")",
"gs_t",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
")",
",",
"[",
"1",
"]",
")",
"summary_kwargs",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"t",
"in",
"summaries",
":",
"# TODO(aidangomez): enable ImageSummary support when we have a faster method",
"# see @shibow's comment in cl/202344570",
"if",
"t",
".",
"op",
".",
"type",
"not",
"in",
"[",
"\"ScalarSummary\"",
"]",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"\"Ignoring unsupported tf.Summary type %s\"",
"%",
"t",
".",
"op",
".",
"type",
")",
"continue",
"name",
"=",
"t",
".",
"op",
".",
"name",
"tensor",
"=",
"t",
".",
"op",
".",
"inputs",
"[",
"1",
"]",
"if",
"t",
".",
"op",
".",
"type",
"==",
"\"ScalarSummary\"",
":",
"assert",
"tensor",
".",
"shape",
".",
"is_compatible_with",
"(",
"[",
"]",
")",
"if",
"tensor",
".",
"dtype",
"==",
"tf",
".",
"int64",
":",
"tensor",
"=",
"tf",
".",
"to_int32",
"(",
"tensor",
")",
"summary_kwargs",
"[",
"\"ScalarSummary\"",
"+",
"name",
"]",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
",",
"[",
"1",
"]",
")",
"elif",
"t",
".",
"op",
".",
"type",
"==",
"\"ImageSummary\"",
":",
"# TODO(aidangomez): as we move to support more types, update",
"# common_layers.tpu_safe_image_summary",
"if",
"tensor",
".",
"dtype",
"!=",
"tf",
".",
"float32",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"\"Currently T2T on TPU only supports ImageSummary of \"",
"\"tf.float32-type Tensors. Skipping Tensor \"",
"\"%s with dtype %s...\"",
"%",
"(",
"tensor",
".",
"name",
",",
"tensor",
".",
"dtype",
")",
")",
"continue",
"# tensor = tf.to_float(tensor)",
"summary_kwargs",
"[",
"\"ImageSummary\"",
"+",
"name",
"]",
"=",
"tensor",
"# When no supported summaries are found, don't create host_call. Otherwise,",
"# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue",
"# it, eventually causing hang.",
"if",
"not",
"summary_kwargs",
":",
"return",
"None",
"summary_kwargs",
"[",
"\"global_step\"",
"]",
"=",
"gs_t",
"log_info",
"(",
"\"summary_kwargs %s\"",
"%",
"str",
"(",
"summary_kwargs",
")",
")",
"def",
"host_call_fn",
"(",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Training host call. Creates summaries for training metrics.\n\n Args:\n **kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must\n contain key \"global_step\" with value of current global_step Tensor.\n\n Returns:\n List of summary ops to run on the CPU host.\n \"\"\"",
"gs",
"=",
"tf",
".",
"to_int64",
"(",
"kwargs",
".",
"pop",
"(",
"\"global_step\"",
")",
"[",
"0",
"]",
")",
"with",
"tf",
".",
"contrib",
".",
"summary",
".",
"create_file_writer",
"(",
"model_dir",
")",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"contrib",
".",
"summary",
".",
"always_record_summaries",
"(",
")",
":",
"# We need to use tf.contrib.summary in order to feed the `step`.",
"for",
"name",
",",
"value",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"ScalarSummary\"",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"\"ScalarSummary\"",
")",
":",
"]",
"tf",
".",
"contrib",
".",
"summary",
".",
"scalar",
"(",
"name",
",",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"to_float",
"(",
"value",
")",
")",
",",
"step",
"=",
"gs",
")",
"elif",
"name",
".",
"startswith",
"(",
"\"ImageSummary\"",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"\"ImageSummary\"",
")",
":",
"]",
"tf",
".",
"contrib",
".",
"summary",
".",
"image",
"(",
"name",
",",
"value",
",",
"step",
"=",
"gs",
")",
"return",
"tf",
".",
"contrib",
".",
"summary",
".",
"all_summary_ops",
"(",
")",
"return",
"(",
"host_call_fn",
",",
"summary_kwargs",
")"
] |
Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
|
[
"Construct",
"a",
"host_call",
"writing",
"scalar",
"summaries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2027-L2098
|
22,276
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
average_sharded_losses
|
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
|
python
|
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
|
[
"def",
"average_sharded_losses",
"(",
"sharded_losses",
")",
":",
"losses",
"=",
"{",
"}",
"for",
"loss_name",
"in",
"sorted",
"(",
"sharded_losses",
"[",
"0",
"]",
")",
":",
"all_shards",
"=",
"[",
"shard_losses",
"[",
"loss_name",
"]",
"for",
"shard_losses",
"in",
"sharded_losses",
"]",
"if",
"isinstance",
"(",
"all_shards",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"sharded_num",
",",
"sharded_den",
"=",
"zip",
"(",
"*",
"all_shards",
")",
"mean_loss",
"=",
"(",
"tf",
".",
"add_n",
"(",
"sharded_num",
")",
"/",
"tf",
".",
"maximum",
"(",
"tf",
".",
"cast",
"(",
"1.0",
",",
"sharded_den",
"[",
"0",
"]",
".",
"dtype",
")",
",",
"tf",
".",
"add_n",
"(",
"sharded_den",
")",
")",
")",
"else",
":",
"mean_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"all_shards",
")",
"losses",
"[",
"loss_name",
"]",
"=",
"mean_loss",
"return",
"losses"
] |
Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
|
[
"Average",
"losses",
"across",
"datashards",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2121-L2143
|
22,277
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
summarize_features
|
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
|
python
|
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
|
[
"def",
"summarize_features",
"(",
"features",
",",
"num_shards",
"=",
"1",
")",
":",
"if",
"not",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"return",
"with",
"tf",
".",
"name_scope",
"(",
"\"input_stats\"",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"features",
")",
")",
":",
"if",
"(",
"isinstance",
"(",
"v",
",",
"tf",
".",
"Tensor",
")",
"and",
"(",
"v",
".",
"get_shape",
"(",
")",
".",
"ndims",
">",
"1",
")",
"and",
"(",
"v",
".",
"dtype",
"!=",
"tf",
".",
"string",
")",
")",
":",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"%s_batch\"",
"%",
"k",
",",
"tf",
".",
"shape",
"(",
"v",
")",
"[",
"0",
"]",
"//",
"num_shards",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"%s_length\"",
"%",
"k",
",",
"tf",
".",
"shape",
"(",
"v",
")",
"[",
"1",
"]",
")",
"nonpadding",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"not_equal",
"(",
"v",
",",
"0",
")",
")",
"nonpadding_tokens",
"=",
"tf",
".",
"reduce_sum",
"(",
"nonpadding",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"%s_nonpadding_tokens\"",
"%",
"k",
",",
"nonpadding_tokens",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"%s_nonpadding_fraction\"",
"%",
"k",
",",
"tf",
".",
"reduce_mean",
"(",
"nonpadding",
")",
")"
] |
Generate summaries for features.
|
[
"Generate",
"summaries",
"for",
"features",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2146-L2161
|
22,278
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
_compose_custom_getters
|
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
|
python
|
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
|
[
"def",
"_compose_custom_getters",
"(",
"getter_a",
",",
"getter_b",
")",
":",
"if",
"not",
"getter_a",
":",
"return",
"getter_b",
"if",
"not",
"getter_b",
":",
"return",
"getter_a",
"def",
"getter_fn",
"(",
"getter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"getter_b",
"(",
"functools",
".",
"partial",
"(",
"getter_a",
",",
"getter",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"getter_fn"
] |
Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
|
[
"Compose",
"two",
"custom",
"getters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2186-L2211
|
22,279
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
set_custom_getter_compose
|
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter))
|
python
|
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter))
|
[
"def",
"set_custom_getter_compose",
"(",
"custom_getter",
")",
":",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"set_custom_getter",
"(",
"_compose_custom_getters",
"(",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"custom_getter",
",",
"custom_getter",
")",
")"
] |
Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
|
[
"Set",
"a",
"custom",
"getter",
"in",
"the",
"current",
"variable",
"scope",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2214-L2224
|
22,280
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
initialize_from_ckpt
|
def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory."""
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map)
|
python
|
def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory."""
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map)
|
[
"def",
"initialize_from_ckpt",
"(",
"ckpt_dir",
",",
"hparams",
")",
":",
"model_dir",
"=",
"hparams",
".",
"get",
"(",
"\"model_dir\"",
",",
"None",
")",
"already_has_ckpt",
"=",
"(",
"model_dir",
"and",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"model_dir",
")",
"is",
"not",
"None",
")",
"if",
"already_has_ckpt",
":",
"return",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Checkpoint dir: %s\"",
",",
"ckpt_dir",
")",
"reader",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"load_checkpoint",
"(",
"ckpt_dir",
")",
"variable_map",
"=",
"{",
"}",
"for",
"var",
"in",
"tf",
".",
"contrib",
".",
"framework",
".",
"get_trainable_variables",
"(",
")",
":",
"var_name",
"=",
"var",
".",
"name",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"if",
"reader",
".",
"has_tensor",
"(",
"var_name",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Loading variable from checkpoint: %s\"",
",",
"var_name",
")",
"variable_map",
"[",
"var_name",
"]",
"=",
"var",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Cannot find variable in checkpoint, skipping: %s\"",
",",
"var_name",
")",
"tf",
".",
"train",
".",
"init_from_checkpoint",
"(",
"ckpt_dir",
",",
"variable_map",
")"
] |
Initialize variables from given directory.
|
[
"Initialize",
"variables",
"from",
"given",
"directory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L2236-L2255
|
22,281
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel._target_modality_is_real
|
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real")
|
python
|
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real")
|
[
"def",
"_target_modality_is_real",
"(",
"self",
")",
":",
"vocab_size",
"=",
"self",
".",
"_problem_hparams",
".",
"vocab_size",
"[",
"\"targets\"",
"]",
"if",
"vocab_size",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"self",
".",
"_hparams",
",",
"\"vocab_divisor\"",
")",
":",
"vocab_size",
"+=",
"(",
"-",
"vocab_size",
")",
"%",
"self",
".",
"_hparams",
".",
"vocab_divisor",
"modality",
"=",
"self",
".",
"_problem_hparams",
".",
"modality",
"[",
"\"targets\"",
"]",
"modality_name",
"=",
"self",
".",
"_hparams",
".",
"name",
".",
"get",
"(",
"\"targets\"",
",",
"modalities",
".",
"get_name",
"(",
"modality",
")",
")",
"(",
"self",
".",
"_hparams",
",",
"vocab_size",
")",
"return",
"modality_name",
".",
"startswith",
"(",
"\"real\"",
")"
] |
Whether the target modality is real-valued.
|
[
"Whether",
"the",
"target",
"modality",
"is",
"real",
"-",
"valued",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L302-L311
|
22,282
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.model_fn_sharded
|
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses
|
python
|
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses
|
[
"def",
"model_fn_sharded",
"(",
"self",
",",
"sharded_features",
")",
":",
"dp",
"=",
"self",
".",
"_data_parallelism",
"# [{str: Tensor}]. Transpose of 'sharded_features'.",
"datashard_to_features",
"=",
"self",
".",
"_to_features_per_datashard",
"(",
"sharded_features",
")",
"if",
"self",
".",
"use_body_sharded",
"(",
")",
":",
"if",
"self",
".",
"hparams",
".",
"scheduled_sampling_prob",
">",
"0.0",
":",
"raise",
"NotImplementedError",
"(",
"\"Scheduled sampling for non-sharded body only.\"",
")",
"# MoE models override body_sharded",
"transformed_features",
"=",
"dp",
"(",
"self",
".",
"bottom",
",",
"datashard_to_features",
")",
"body_out",
"=",
"self",
".",
"body_sharded",
"(",
"self",
".",
"_to_single_features_dict",
"(",
"transformed_features",
")",
")",
"body_out",
",",
"losses",
"=",
"self",
".",
"_normalize_body_output",
"(",
"body_out",
")",
"if",
"\"training\"",
"in",
"losses",
":",
"log_info",
"(",
"\"Skipping T2TModel top and loss because training loss \"",
"\"returned from body\"",
")",
"sharded_logits",
"=",
"body_out",
"else",
":",
"if",
"isinstance",
"(",
"body_out",
",",
"dict",
")",
":",
"sharded_logits",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"sharded_losses",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"body_out",
")",
")",
":",
"sharded_logits",
"[",
"k",
"]",
"=",
"dp",
"(",
"self",
".",
"top",
",",
"v",
",",
"datashard_to_features",
")",
"sharded_losses",
"[",
"k",
"]",
"=",
"dp",
"(",
"self",
".",
"loss",
",",
"sharded_logits",
"[",
"k",
"]",
",",
"datashard_to_features",
")",
"training_loss_dict",
"=",
"average_sharded_losses",
"(",
"[",
"(",
"{",
"\"training\"",
":",
"l",
"}",
"for",
"l",
"in",
"loss",
")",
"for",
"loss",
"in",
"sharded_losses",
".",
"values",
"(",
")",
"]",
")",
"losses",
".",
"update",
"(",
"training_loss_dict",
")",
"else",
":",
"sharded_logits",
"=",
"dp",
"(",
"self",
".",
"top",
",",
"body_out",
",",
"datashard_to_features",
")",
"sharded_losses",
"=",
"dp",
"(",
"self",
".",
"loss",
",",
"sharded_logits",
",",
"datashard_to_features",
")",
"if",
"isinstance",
"(",
"sharded_losses",
",",
"tuple",
")",
":",
"nums",
",",
"dens",
"=",
"sharded_losses",
"sharded_losses",
"=",
"zip",
"(",
"nums",
",",
"dens",
")",
"training_loss_dict",
"=",
"average_sharded_losses",
"(",
"[",
"{",
"\"training\"",
":",
"loss",
"}",
"for",
"loss",
"in",
"sharded_losses",
"]",
")",
"losses",
".",
"update",
"(",
"training_loss_dict",
")",
"else",
":",
"sharded_logits",
",",
"sharded_losses",
"=",
"dp",
"(",
"self",
".",
"model_fn",
",",
"datashard_to_features",
")",
"sharded_logits",
",",
"sharded_losses",
"=",
"dp",
"(",
"self",
".",
"maybe_scheduled_sampling",
",",
"datashard_to_features",
",",
"sharded_logits",
",",
"sharded_losses",
")",
"if",
"isinstance",
"(",
"sharded_logits",
"[",
"0",
"]",
",",
"dict",
")",
":",
"temp_dict",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
",",
"_",
"in",
"six",
".",
"iteritems",
"(",
"sharded_logits",
"[",
"0",
"]",
")",
"}",
"for",
"k",
",",
"_",
"in",
"six",
".",
"iteritems",
"(",
"sharded_logits",
"[",
"0",
"]",
")",
":",
"for",
"l",
"in",
"sharded_logits",
":",
"temp_dict",
"[",
"k",
"]",
".",
"append",
"(",
"l",
"[",
"k",
"]",
")",
"sharded_logits",
"=",
"temp_dict",
"losses",
"=",
"average_sharded_losses",
"(",
"sharded_losses",
")",
"return",
"sharded_logits",
",",
"losses"
] |
Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
|
[
"Estimator",
"model_fn",
"sharded",
"along",
"batch",
"dimension",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L348-L412
|
22,283
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.bottom
|
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
|
python
|
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
|
[
"def",
"bottom",
"(",
"self",
",",
"features",
")",
":",
"if",
"not",
"self",
".",
"_problem_hparams",
":",
"log_warn",
"(",
"\"Without a Problem, T2TModel.bottom is a passthrough.\"",
")",
"return",
"features",
"transformed_features",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"all_previous_modalities",
"=",
"[",
"]",
"target_modality",
"=",
"_create_target_modality",
"(",
"self",
".",
"_problem_hparams",
".",
"modality",
")",
"# Transform features via its corresponding modality.",
"for",
"feature_name",
",",
"modality",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"self",
".",
"_problem_hparams",
".",
"modality",
")",
")",
":",
"if",
"feature_name",
"not",
"in",
"features",
":",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"Missing feature %s - ignoring.\"",
"%",
"feature_name",
")",
"continue",
"vocab_size",
"=",
"self",
".",
"_problem_hparams",
".",
"vocab_size",
"[",
"feature_name",
"]",
"if",
"vocab_size",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"self",
".",
"_hparams",
",",
"\"vocab_divisor\"",
")",
":",
"vocab_size",
"+=",
"(",
"-",
"vocab_size",
")",
"%",
"self",
".",
"_hparams",
".",
"vocab_divisor",
"modality_name",
"=",
"self",
".",
"_hparams",
".",
"name",
".",
"get",
"(",
"feature_name",
",",
"modalities",
".",
"get_name",
"(",
"modality",
")",
")",
"(",
"self",
".",
"_hparams",
",",
"vocab_size",
")",
"# Use if-else clauses to preserve behavior of previous changes: namely,",
"# the variable scope name for the targets feature if there is only one",
"# target modality; and to reuse variable scopes for only input modalities.",
"if",
"feature_name",
"in",
"target_modality",
":",
"if",
"len",
"(",
"target_modality",
")",
">",
"1",
":",
"variable_scope_name",
"=",
"\"%s/%s\"",
"%",
"(",
"modality_name",
",",
"feature_name",
")",
"else",
":",
"variable_scope_name",
"=",
"modality_name",
"bottom",
"=",
"self",
".",
"_hparams",
".",
"bottom",
".",
"get",
"(",
"feature_name",
",",
"modalities",
".",
"get_targets_bottom",
"(",
"modality",
")",
")",
"# TODO(aidangomez): share variables?",
"with",
"tf",
".",
"variable_scope",
"(",
"variable_scope_name",
")",
"as",
"vs",
":",
"self",
".",
"_add_variable_scope",
"(",
"variable_scope_name",
",",
"vs",
")",
"log_info",
"(",
"\"Transforming feature '%s' with %s.targets_bottom\"",
",",
"feature_name",
",",
"modality_name",
")",
"transformed_features",
"[",
"feature_name",
"]",
"=",
"bottom",
"(",
"features",
"[",
"feature_name",
"]",
",",
"self",
".",
"_hparams",
",",
"vocab_size",
")",
"else",
":",
"bottom",
"=",
"self",
".",
"_hparams",
".",
"bottom",
".",
"get",
"(",
"feature_name",
",",
"modalities",
".",
"get_bottom",
"(",
"modality",
")",
")",
"do_reuse",
"=",
"modality_name",
"in",
"all_previous_modalities",
"with",
"tf",
".",
"variable_scope",
"(",
"modality_name",
",",
"reuse",
"=",
"do_reuse",
")",
"as",
"vs",
":",
"self",
".",
"_add_variable_scope",
"(",
"modality_name",
",",
"vs",
")",
"log_info",
"(",
"\"Transforming feature '%s' with %s.bottom\"",
",",
"feature_name",
",",
"modality_name",
")",
"transformed_features",
"[",
"feature_name",
"]",
"=",
"bottom",
"(",
"features",
"[",
"feature_name",
"]",
",",
"self",
".",
"_hparams",
",",
"vocab_size",
")",
"all_previous_modalities",
".",
"append",
"(",
"modality_name",
")",
"for",
"key",
"in",
"features",
":",
"if",
"key",
"not",
"in",
"transformed_features",
":",
"# For features without a modality, we pass them along as is",
"transformed_features",
"[",
"key",
"]",
"=",
"features",
"[",
"key",
"]",
"else",
":",
"# Other features get passed along with the \"raw\" suffix",
"transformed_features",
"[",
"key",
"+",
"\"_raw\"",
"]",
"=",
"features",
"[",
"key",
"]",
"return",
"transformed_features"
] |
Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
|
[
"Transforms",
"features",
"to",
"feed",
"into",
"body",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L443-L516
|
22,284
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.top
|
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
"""
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features)
|
python
|
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
"""
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features)
|
[
"def",
"top",
"(",
"self",
",",
"body_output",
",",
"features",
")",
":",
"if",
"isinstance",
"(",
"body_output",
",",
"dict",
")",
":",
"logits",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"body_output",
")",
":",
"# TODO(aidangomez): share variables here?",
"with",
"tf",
".",
"variable_scope",
"(",
"k",
")",
"as",
"top_vs",
":",
"self",
".",
"_add_variable_scope",
"(",
"\"top_%s\"",
"%",
"k",
",",
"top_vs",
")",
"logits",
"[",
"k",
"]",
"=",
"self",
".",
"_top_single",
"(",
"v",
",",
"k",
",",
"features",
")",
"return",
"logits",
"else",
":",
"return",
"self",
".",
"_top_single",
"(",
"body_output",
",",
"\"targets\"",
",",
"features",
")"
] |
Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
|
[
"Computes",
"logits",
"given",
"body",
"output",
"and",
"features",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L583-L612
|
22,285
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.optimize
|
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op
|
python
|
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op
|
[
"def",
"optimize",
"(",
"self",
",",
"loss",
",",
"num_async_replicas",
"=",
"1",
",",
"use_tpu",
"=",
"False",
")",
":",
"lr",
"=",
"learning_rate",
".",
"learning_rate_schedule",
"(",
"self",
".",
"hparams",
")",
"if",
"num_async_replicas",
">",
"1",
":",
"log_info",
"(",
"\"Dividing learning rate by num_async_replicas: %d\"",
",",
"num_async_replicas",
")",
"lr",
"/=",
"math",
".",
"sqrt",
"(",
"float",
"(",
"num_async_replicas",
")",
")",
"train_op",
"=",
"optimize",
".",
"optimize",
"(",
"loss",
",",
"lr",
",",
"self",
".",
"hparams",
",",
"use_tpu",
"=",
"use_tpu",
")",
"return",
"train_op"
] |
Return a training op minimizing loss.
|
[
"Return",
"a",
"training",
"op",
"minimizing",
"loss",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L710-L718
|
22,286
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.set_mode
|
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
|
python
|
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
|
[
"def",
"set_mode",
"(",
"self",
",",
"mode",
")",
":",
"log_info",
"(",
"\"Setting T2TModel mode to '%s'\"",
",",
"mode",
")",
"hparams",
"=",
"hparams_lib",
".",
"copy_hparams",
"(",
"self",
".",
"_original_hparams",
")",
"hparams",
".",
"add_hparam",
"(",
"\"mode\"",
",",
"mode",
")",
"# When not in training mode, set all forms of dropout to zero.",
"if",
"mode",
"!=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"for",
"key",
"in",
"hparams",
".",
"values",
"(",
")",
":",
"if",
"key",
".",
"endswith",
"(",
"\"dropout\"",
")",
"or",
"key",
"==",
"\"label_smoothing\"",
":",
"log_info",
"(",
"\"Setting hparams.%s to 0.0\"",
",",
"key",
")",
"setattr",
"(",
"hparams",
",",
"key",
",",
"0.0",
")",
"self",
".",
"_hparams",
"=",
"hparams"
] |
Set hparams with the given mode.
|
[
"Set",
"hparams",
"with",
"the",
"given",
"mode",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L720-L731
|
22,287
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.eval_autoregressive
|
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
|
python
|
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
|
[
"def",
"eval_autoregressive",
"(",
"self",
",",
"features",
"=",
"None",
",",
"decode_length",
"=",
"50",
")",
":",
"results",
"=",
"self",
".",
"_slow_greedy_infer",
"(",
"features",
",",
"decode_length",
"=",
"decode_length",
")",
"return",
"results",
"[",
"\"logits\"",
"]",
",",
"results",
"[",
"\"losses\"",
"]"
] |
Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
|
[
"Autoregressive",
"eval",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L737-L752
|
22,288
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.infer
|
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results
|
python
|
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results
|
[
"def",
"infer",
"(",
"self",
",",
"features",
"=",
"None",
",",
"decode_length",
"=",
"50",
",",
"beam_size",
"=",
"1",
",",
"top_beams",
"=",
"1",
",",
"alpha",
"=",
"0.0",
",",
"use_tpu",
"=",
"False",
")",
":",
"set_custom_getter_compose",
"(",
"self",
".",
"_custom_getter",
")",
"with",
"self",
".",
"_eager_var_store",
".",
"as_default",
"(",
")",
":",
"# TODO(rsepassi): Make decoding work with real-valued model outputs",
"# (i.e. if the target modality is RealModality).",
"self",
".",
"prepare_features_for_infer",
"(",
"features",
")",
"if",
"not",
"self",
".",
"has_input",
"and",
"beam_size",
">",
"1",
":",
"log_warn",
"(",
"\"Beam searching for a model with no inputs.\"",
")",
"if",
"not",
"self",
".",
"has_input",
"and",
"self",
".",
"hparams",
".",
"sampling_method",
"!=",
"\"random\"",
":",
"log_warn",
"(",
"\"Non-random sampling for a model with no inputs.\"",
")",
"self",
".",
"_fill_problem_hparams_features",
"(",
"features",
")",
"if",
"self",
".",
"_problem_hparams",
":",
"target_modality",
"=",
"self",
".",
"_problem_hparams",
".",
"modality",
"[",
"\"targets\"",
"]",
"if",
"target_modality",
"==",
"modalities",
".",
"ModalityType",
".",
"CLASS_LABEL",
":",
"beam_size",
"=",
"1",
"# No use to run beam-search for a single class.",
"if",
"beam_size",
"==",
"1",
":",
"log_info",
"(",
"\"Greedy Decoding\"",
")",
"results",
"=",
"self",
".",
"_greedy_infer",
"(",
"features",
",",
"decode_length",
",",
"use_tpu",
")",
"else",
":",
"log_info",
"(",
"\"Beam Decoding with beam size %d\"",
"%",
"beam_size",
")",
"results",
"=",
"self",
".",
"_beam_decode",
"(",
"features",
",",
"decode_length",
",",
"beam_size",
",",
"top_beams",
",",
"alpha",
",",
"use_tpu",
")",
"return",
"results"
] |
A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
|
[
"A",
"inference",
"method",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L761-L817
|
22,289
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel._beam_decode
|
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu)
|
python
|
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu)
|
[
"def",
"_beam_decode",
"(",
"self",
",",
"features",
",",
"decode_length",
",",
"beam_size",
",",
"top_beams",
",",
"alpha",
",",
"use_tpu",
"=",
"False",
")",
":",
"return",
"self",
".",
"_beam_decode_slow",
"(",
"features",
",",
"decode_length",
",",
"beam_size",
",",
"top_beams",
",",
"alpha",
",",
"use_tpu",
")"
] |
Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
|
[
"Beam",
"search",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L819-L843
|
22,290
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel._greedy_infer
|
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length)
|
python
|
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length)
|
[
"def",
"_greedy_infer",
"(",
"self",
",",
"features",
",",
"decode_length",
",",
"use_tpu",
"=",
"False",
")",
":",
"if",
"use_tpu",
":",
"return",
"self",
".",
"_slow_greedy_infer_tpu",
"(",
"features",
",",
"decode_length",
")",
"return",
"self",
".",
"_slow_greedy_infer",
"(",
"features",
",",
"decode_length",
")"
] |
A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
|
[
"A",
"greedy",
"inference",
"method",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L953-L975
|
22,291
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.sample
|
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
|
python
|
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
|
[
"def",
"sample",
"(",
"self",
",",
"features",
")",
":",
"logits",
",",
"losses",
"=",
"self",
"(",
"features",
")",
"# pylint: disable=not-callable",
"if",
"self",
".",
"_target_modality_is_real",
":",
"return",
"logits",
",",
"logits",
",",
"losses",
"# Raw numbers returned from real modality.",
"if",
"self",
".",
"hparams",
".",
"sampling_method",
"==",
"\"argmax\"",
":",
"samples",
"=",
"tf",
".",
"argmax",
"(",
"logits",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"assert",
"self",
".",
"hparams",
".",
"sampling_method",
"==",
"\"random\"",
"def",
"multinomial_squeeze",
"(",
"logits",
",",
"temperature",
"=",
"1.0",
")",
":",
"logits_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"logits",
")",
"reshaped_logits",
"=",
"(",
"tf",
".",
"reshape",
"(",
"logits",
",",
"[",
"-",
"1",
",",
"logits_shape",
"[",
"-",
"1",
"]",
"]",
")",
"/",
"temperature",
")",
"choices",
"=",
"tf",
".",
"multinomial",
"(",
"reshaped_logits",
",",
"1",
")",
"choices",
"=",
"tf",
".",
"reshape",
"(",
"choices",
",",
"logits_shape",
"[",
":",
"-",
"1",
"]",
")",
"return",
"choices",
"samples",
"=",
"multinomial_squeeze",
"(",
"logits",
",",
"self",
".",
"hparams",
".",
"sampling_temp",
")",
"return",
"samples",
",",
"logits",
",",
"losses"
] |
Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
|
[
"Run",
"the",
"model",
"and",
"extract",
"samples",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L1326-L1355
|
22,292
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel._summarize_losses
|
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary."""
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val)
|
python
|
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary."""
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val)
|
[
"def",
"_summarize_losses",
"(",
"self",
",",
"losses_dict",
")",
":",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"losses\"",
")",
":",
"for",
"loss_name",
",",
"loss_val",
"in",
"sorted",
"(",
"losses_dict",
".",
"items",
"(",
")",
")",
":",
"tf",
".",
"summary",
".",
"scalar",
"(",
"loss_name",
",",
"loss_val",
")"
] |
Adds `tf.summary`s to all terms in the losses dictionary.
|
[
"Adds",
"tf",
".",
"summary",
"s",
"to",
"all",
"terms",
"in",
"the",
"losses",
"dictionary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L1771-L1776
|
22,293
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/t2t_model.py
|
T2TModel.maybe_scheduled_sampling
|
def maybe_scheduled_sampling(self, features, logits, losses):
"""Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
"""
hparams = self.hparams
problem_hparams = self._problem_hparams
# Only do scheduled sampling if requested.
if hparams.scheduled_sampling_prob == 0.0:
return (logits, losses)
# Only do scheduled sampling on language tasks.
modality = problem_hparams.modality["targets"]
if modality != modalities.ModalityType.SYMBOL:
assert hparams.scheduled_sampling_prob == 0, (
"Scheduled sampling only applies to ModalityType.SYMBOL. Set "
"hparams.scheduled_sampling_prob == 0.0.")
return (logits, losses)
# Only do scheduled sampling when training.
is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)
if not is_training:
tf.logging.info("Running in %s mode. Not using scheduled sampling.",
hparams.mode)
return (logits, losses)
# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.
vocab_size = problem_hparams.vocab_size["targets"]
assert vocab_size is not None
assert hparams.vocab_divisor == 1
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob):
"""Interleave sampled and gold tokens randomly."""
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
mixin_prob),
sampled_targets,
gold_targets)
def sampled_results(features, logits, mixin_prob):
"""Generate scheduled sampling results."""
sampled_targets = sample(logits)
new_targets = mix_gold_sampled(features["targets"],
sampled_targets,
mixin_prob)
new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given.
new_features = copy.copy(features)
new_features["targets"] = new_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Compute bottom() for new_targets.
#
# TODO(duckworthd): Only apply bottom to 'new_targets'.
new_transformed_features = self.bottom(new_features)
# Compute body.
with tf.variable_scope("body"):
new_body_outputs, new_losses = self._normalize_body_output(
self.body(new_transformed_features))
assert "training" not in new_losses
# Compute top.
new_logits = self.top(new_body_outputs, new_features)
# Compute loss. Use original features (== labels).
if (hparams.mode != tf.estimator.ModeKeys.PREDICT and
hparams.mode != "attack"):
new_losses["training"] = self.loss(new_logits, features)
else:
new_losses["training"] = 0.0
return new_logits, new_losses
tf.logging.info("Using scheduled sampling.")
assert hparams.scheduled_sampling_prob == 1.0, (
"hparams.scheduled_sampling_prob must be 0 or 1.")
# Gradually increase over a warmup period. Lower numbers mean more gold
# tokens.
mixin_prob = (
hparams.scheduled_sampling_gold_mixin_prob *
common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps,
min_value=0.001)
)
# Apply scheduled sampling over N passes. The logits from the (n-1)-th pass
# will be mixed with gold tokens for conditioning in the n-th pass.
scheduled_sampling_num_passes = getattr(
hparams, "scheduled_sampling_num_passes", 1)
assert scheduled_sampling_num_passes > 0, (
"hparams.scheduled_sampling_num_passes must be > 0 if "
"hparams.scheduled_sampling_prob > 0.0")
new_logits = logits
new_losses = losses
for _ in range(scheduled_sampling_num_passes):
new_logits, new_losses = sampled_results(features, new_logits, mixin_prob)
return new_logits, new_losses
|
python
|
def maybe_scheduled_sampling(self, features, logits, losses):
"""Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
"""
hparams = self.hparams
problem_hparams = self._problem_hparams
# Only do scheduled sampling if requested.
if hparams.scheduled_sampling_prob == 0.0:
return (logits, losses)
# Only do scheduled sampling on language tasks.
modality = problem_hparams.modality["targets"]
if modality != modalities.ModalityType.SYMBOL:
assert hparams.scheduled_sampling_prob == 0, (
"Scheduled sampling only applies to ModalityType.SYMBOL. Set "
"hparams.scheduled_sampling_prob == 0.0.")
return (logits, losses)
# Only do scheduled sampling when training.
is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)
if not is_training:
tf.logging.info("Running in %s mode. Not using scheduled sampling.",
hparams.mode)
return (logits, losses)
# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.
vocab_size = problem_hparams.vocab_size["targets"]
assert vocab_size is not None
assert hparams.vocab_divisor == 1
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob):
"""Interleave sampled and gold tokens randomly."""
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
mixin_prob),
sampled_targets,
gold_targets)
def sampled_results(features, logits, mixin_prob):
"""Generate scheduled sampling results."""
sampled_targets = sample(logits)
new_targets = mix_gold_sampled(features["targets"],
sampled_targets,
mixin_prob)
new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given.
new_features = copy.copy(features)
new_features["targets"] = new_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Compute bottom() for new_targets.
#
# TODO(duckworthd): Only apply bottom to 'new_targets'.
new_transformed_features = self.bottom(new_features)
# Compute body.
with tf.variable_scope("body"):
new_body_outputs, new_losses = self._normalize_body_output(
self.body(new_transformed_features))
assert "training" not in new_losses
# Compute top.
new_logits = self.top(new_body_outputs, new_features)
# Compute loss. Use original features (== labels).
if (hparams.mode != tf.estimator.ModeKeys.PREDICT and
hparams.mode != "attack"):
new_losses["training"] = self.loss(new_logits, features)
else:
new_losses["training"] = 0.0
return new_logits, new_losses
tf.logging.info("Using scheduled sampling.")
assert hparams.scheduled_sampling_prob == 1.0, (
"hparams.scheduled_sampling_prob must be 0 or 1.")
# Gradually increase over a warmup period. Lower numbers mean more gold
# tokens.
mixin_prob = (
hparams.scheduled_sampling_gold_mixin_prob *
common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps,
min_value=0.001)
)
# Apply scheduled sampling over N passes. The logits from the (n-1)-th pass
# will be mixed with gold tokens for conditioning in the n-th pass.
scheduled_sampling_num_passes = getattr(
hparams, "scheduled_sampling_num_passes", 1)
assert scheduled_sampling_num_passes > 0, (
"hparams.scheduled_sampling_num_passes must be > 0 if "
"hparams.scheduled_sampling_prob > 0.0")
new_logits = logits
new_losses = losses
for _ in range(scheduled_sampling_num_passes):
new_logits, new_losses = sampled_results(features, new_logits, mixin_prob)
return new_logits, new_losses
|
[
"def",
"maybe_scheduled_sampling",
"(",
"self",
",",
"features",
",",
"logits",
",",
"losses",
")",
":",
"hparams",
"=",
"self",
".",
"hparams",
"problem_hparams",
"=",
"self",
".",
"_problem_hparams",
"# Only do scheduled sampling if requested.",
"if",
"hparams",
".",
"scheduled_sampling_prob",
"==",
"0.0",
":",
"return",
"(",
"logits",
",",
"losses",
")",
"# Only do scheduled sampling on language tasks.",
"modality",
"=",
"problem_hparams",
".",
"modality",
"[",
"\"targets\"",
"]",
"if",
"modality",
"!=",
"modalities",
".",
"ModalityType",
".",
"SYMBOL",
":",
"assert",
"hparams",
".",
"scheduled_sampling_prob",
"==",
"0",
",",
"(",
"\"Scheduled sampling only applies to ModalityType.SYMBOL. Set \"",
"\"hparams.scheduled_sampling_prob == 0.0.\"",
")",
"return",
"(",
"logits",
",",
"losses",
")",
"# Only do scheduled sampling when training.",
"is_training",
"=",
"(",
"hparams",
".",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
"if",
"not",
"is_training",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Running in %s mode. Not using scheduled sampling.\"",
",",
"hparams",
".",
"mode",
")",
"return",
"(",
"logits",
",",
"losses",
")",
"# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.",
"vocab_size",
"=",
"problem_hparams",
".",
"vocab_size",
"[",
"\"targets\"",
"]",
"assert",
"vocab_size",
"is",
"not",
"None",
"assert",
"hparams",
".",
"vocab_divisor",
"==",
"1",
"def",
"sample",
"(",
"x",
")",
":",
"\"\"\"Multinomial sampling from a n-dimensional tensor.\"\"\"",
"samples",
"=",
"tf",
".",
"multinomial",
"(",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"vocab_size",
"]",
")",
",",
"1",
")",
"reshaped_samples",
"=",
"tf",
".",
"reshape",
"(",
"samples",
",",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
":",
"-",
"1",
"]",
")",
"return",
"tf",
".",
"to_int32",
"(",
"reshaped_samples",
")",
"def",
"mix_gold_sampled",
"(",
"gold_targets",
",",
"sampled_targets",
",",
"mixin_prob",
")",
":",
"\"\"\"Interleave sampled and gold tokens randomly.\"\"\"",
"return",
"tf",
".",
"where",
"(",
"tf",
".",
"less",
"(",
"tf",
".",
"random_uniform",
"(",
"common_layers",
".",
"shape_list",
"(",
"sampled_targets",
")",
")",
",",
"mixin_prob",
")",
",",
"sampled_targets",
",",
"gold_targets",
")",
"def",
"sampled_results",
"(",
"features",
",",
"logits",
",",
"mixin_prob",
")",
":",
"\"\"\"Generate scheduled sampling results.\"\"\"",
"sampled_targets",
"=",
"sample",
"(",
"logits",
")",
"new_targets",
"=",
"mix_gold_sampled",
"(",
"features",
"[",
"\"targets\"",
"]",
",",
"sampled_targets",
",",
"mixin_prob",
")",
"new_targets",
"=",
"tf",
".",
"stop_gradient",
"(",
"new_targets",
")",
"# Treat new_targets as given.",
"new_features",
"=",
"copy",
".",
"copy",
"(",
"features",
")",
"new_features",
"[",
"\"targets\"",
"]",
"=",
"new_targets",
"with",
"tf",
".",
"variable_scope",
"(",
"tf",
".",
"get_variable_scope",
"(",
")",
",",
"reuse",
"=",
"True",
")",
":",
"# Compute bottom() for new_targets.",
"#",
"# TODO(duckworthd): Only apply bottom to 'new_targets'.",
"new_transformed_features",
"=",
"self",
".",
"bottom",
"(",
"new_features",
")",
"# Compute body.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"body\"",
")",
":",
"new_body_outputs",
",",
"new_losses",
"=",
"self",
".",
"_normalize_body_output",
"(",
"self",
".",
"body",
"(",
"new_transformed_features",
")",
")",
"assert",
"\"training\"",
"not",
"in",
"new_losses",
"# Compute top.",
"new_logits",
"=",
"self",
".",
"top",
"(",
"new_body_outputs",
",",
"new_features",
")",
"# Compute loss. Use original features (== labels).",
"if",
"(",
"hparams",
".",
"mode",
"!=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
"and",
"hparams",
".",
"mode",
"!=",
"\"attack\"",
")",
":",
"new_losses",
"[",
"\"training\"",
"]",
"=",
"self",
".",
"loss",
"(",
"new_logits",
",",
"features",
")",
"else",
":",
"new_losses",
"[",
"\"training\"",
"]",
"=",
"0.0",
"return",
"new_logits",
",",
"new_losses",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using scheduled sampling.\"",
")",
"assert",
"hparams",
".",
"scheduled_sampling_prob",
"==",
"1.0",
",",
"(",
"\"hparams.scheduled_sampling_prob must be 0 or 1.\"",
")",
"# Gradually increase over a warmup period. Lower numbers mean more gold",
"# tokens.",
"mixin_prob",
"=",
"(",
"hparams",
".",
"scheduled_sampling_gold_mixin_prob",
"*",
"common_layers",
".",
"inverse_exp_decay",
"(",
"hparams",
".",
"scheduled_sampling_warmup_steps",
",",
"min_value",
"=",
"0.001",
")",
")",
"# Apply scheduled sampling over N passes. The logits from the (n-1)-th pass",
"# will be mixed with gold tokens for conditioning in the n-th pass.",
"scheduled_sampling_num_passes",
"=",
"getattr",
"(",
"hparams",
",",
"\"scheduled_sampling_num_passes\"",
",",
"1",
")",
"assert",
"scheduled_sampling_num_passes",
">",
"0",
",",
"(",
"\"hparams.scheduled_sampling_num_passes must be > 0 if \"",
"\"hparams.scheduled_sampling_prob > 0.0\"",
")",
"new_logits",
"=",
"logits",
"new_losses",
"=",
"losses",
"for",
"_",
"in",
"range",
"(",
"scheduled_sampling_num_passes",
")",
":",
"new_logits",
",",
"new_losses",
"=",
"sampled_results",
"(",
"features",
",",
"new_logits",
",",
"mixin_prob",
")",
"return",
"new_logits",
",",
"new_losses"
] |
Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
|
[
"Scheduled",
"sampling",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/t2t_model.py#L1778-L1901
|
22,294
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
expand_batch_coordinates
|
def expand_batch_coordinates(bc, length_factor):
"""Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
"""
assert bc.get_shape().as_list() == [1, None, 1]
# bc has shape [1, length, 1]
bc *= tf.constant([[1] * length_factor])
# bc has shape [1, length, length_factor]
bc = tf.reshape(bc, [1, -1, 1])
# bc has shape [1, length*length_factor]
return bc
|
python
|
def expand_batch_coordinates(bc, length_factor):
"""Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
"""
assert bc.get_shape().as_list() == [1, None, 1]
# bc has shape [1, length, 1]
bc *= tf.constant([[1] * length_factor])
# bc has shape [1, length, length_factor]
bc = tf.reshape(bc, [1, -1, 1])
# bc has shape [1, length*length_factor]
return bc
|
[
"def",
"expand_batch_coordinates",
"(",
"bc",
",",
"length_factor",
")",
":",
"assert",
"bc",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"==",
"[",
"1",
",",
"None",
",",
"1",
"]",
"# bc has shape [1, length, 1]",
"bc",
"*=",
"tf",
".",
"constant",
"(",
"[",
"[",
"1",
"]",
"*",
"length_factor",
"]",
")",
"# bc has shape [1, length, length_factor]",
"bc",
"=",
"tf",
".",
"reshape",
"(",
"bc",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
"]",
")",
"# bc has shape [1, length*length_factor]",
"return",
"bc"
] |
Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
|
[
"Duplicate",
"elements",
"of",
"bc",
"by",
"length_factor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L377-L394
|
22,295
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
remove_pad
|
def remove_pad(x, pad_remover, mode):
"""Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
"""
# Concatenate all tokens (without padding)
x = expert_utils.flatten_all_but_last(x)
# Remove padding for training and eval
if mode != ModeKeys.PREDICT:
# This is a hack to allows inference when the <go> token
# is detected as padding and removed. This works for now because there is
# no padding at inference.
x = pad_remover.remove(x)
x = tf.expand_dims(x, axis=0) # Now batch_size=1
return x
|
python
|
def remove_pad(x, pad_remover, mode):
"""Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
"""
# Concatenate all tokens (without padding)
x = expert_utils.flatten_all_but_last(x)
# Remove padding for training and eval
if mode != ModeKeys.PREDICT:
# This is a hack to allows inference when the <go> token
# is detected as padding and removed. This works for now because there is
# no padding at inference.
x = pad_remover.remove(x)
x = tf.expand_dims(x, axis=0) # Now batch_size=1
return x
|
[
"def",
"remove_pad",
"(",
"x",
",",
"pad_remover",
",",
"mode",
")",
":",
"# Concatenate all tokens (without padding)",
"x",
"=",
"expert_utils",
".",
"flatten_all_but_last",
"(",
"x",
")",
"# Remove padding for training and eval",
"if",
"mode",
"!=",
"ModeKeys",
".",
"PREDICT",
":",
"# This is a hack to allows inference when the <go> token",
"# is detected as padding and removed. This works for now because there is",
"# no padding at inference.",
"x",
"=",
"pad_remover",
".",
"remove",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"0",
")",
"# Now batch_size=1",
"return",
"x"
] |
Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
|
[
"Remove",
"padding",
"by",
"concatenating",
"all",
"dimension",
"into",
"one",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L398-L422
|
22,296
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
attention_lm_ae_extended
|
def attention_lm_ae_extended():
"""Experiment with the exp_factor params."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams
|
python
|
def attention_lm_ae_extended():
"""Experiment with the exp_factor params."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams
|
[
"def",
"attention_lm_ae_extended",
"(",
")",
":",
"hparams",
"=",
"attention_lm_moe_base_long_seq",
"(",
")",
"hparams",
".",
"attention_layers",
"=",
"\"eeee\"",
"hparams",
".",
"attention_local",
"=",
"True",
"# hparams.factored_logits=1 # Necessary when the number of expert grow bigger",
"hparams",
".",
"attention_moe_k",
"=",
"2",
"hparams",
".",
"attention_exp_factor",
"=",
"4",
"# hparams.attention_exp_inputdim = 128",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"n\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"da\"",
"return",
"hparams"
] |
Experiment with the exp_factor params.
|
[
"Experiment",
"with",
"the",
"exp_factor",
"params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L599-L611
|
22,297
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
attention_lm_moe_small
|
def attention_lm_moe_small():
"""Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.moe_num_experts = 128
hparams.moe_layers = "2"
return hparams
|
python
|
def attention_lm_moe_small():
"""Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.moe_num_experts = 128
hparams.moe_layers = "2"
return hparams
|
[
"def",
"attention_lm_moe_small",
"(",
")",
":",
"hparams",
"=",
"attention_lm_moe_base",
"(",
")",
"hparams",
".",
"num_hidden_layers",
"=",
"4",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"filter_size",
"=",
"2048",
"hparams",
".",
"moe_num_experts",
"=",
"128",
"hparams",
".",
"moe_layers",
"=",
"\"2\"",
"return",
"hparams"
] |
Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object.
|
[
"Cheap",
"model",
"for",
"single",
"-",
"gpu",
"training",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L632-L650
|
22,298
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
attention_lm_attention_moe_tiny
|
def attention_lm_attention_moe_tiny():
"""Cheap model for debugging.
Returns:
an hparams object.
"""
hparams = attention_lm_moe_small()
hparams.moe_layers = ""
hparams.attention_num_experts = 128
hparams.filter_size = 8192
hparams.attention_type = AttentionType.LOCAL_EXPERTS
return hparams
|
python
|
def attention_lm_attention_moe_tiny():
"""Cheap model for debugging.
Returns:
an hparams object.
"""
hparams = attention_lm_moe_small()
hparams.moe_layers = ""
hparams.attention_num_experts = 128
hparams.filter_size = 8192
hparams.attention_type = AttentionType.LOCAL_EXPERTS
return hparams
|
[
"def",
"attention_lm_attention_moe_tiny",
"(",
")",
":",
"hparams",
"=",
"attention_lm_moe_small",
"(",
")",
"hparams",
".",
"moe_layers",
"=",
"\"\"",
"hparams",
".",
"attention_num_experts",
"=",
"128",
"hparams",
".",
"filter_size",
"=",
"8192",
"hparams",
".",
"attention_type",
"=",
"AttentionType",
".",
"LOCAL_EXPERTS",
"return",
"hparams"
] |
Cheap model for debugging.
Returns:
an hparams object.
|
[
"Cheap",
"model",
"for",
"debugging",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L666-L677
|
22,299
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/attention_lm_moe.py
|
attention_lm_moe_large
|
def attention_lm_moe_large():
"""Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_sizes = "4096"
hparams.moe_num_experts = 128
hparams.layer_prepostprocess_dropout = 0.2
return hparams
|
python
|
def attention_lm_moe_large():
"""Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_sizes = "4096"
hparams.moe_num_experts = 128
hparams.layer_prepostprocess_dropout = 0.2
return hparams
|
[
"def",
"attention_lm_moe_large",
"(",
")",
":",
"hparams",
"=",
"attention_lm_moe_base",
"(",
")",
"hparams",
".",
"num_hidden_layers",
"=",
"5",
"hparams",
".",
"moe_layers",
"=",
"\"3\"",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"filter_size",
"=",
"4096",
"hparams",
".",
"moe_hidden_sizes",
"=",
"\"4096\"",
"hparams",
".",
"moe_num_experts",
"=",
"128",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.2",
"return",
"hparams"
] |
Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
|
[
"Large",
"model",
"for",
"distributed",
"training",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L699-L722
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.