id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
21,800
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
linear_interpolate_rank
|
def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):
"""Linearly interpolate channel at "rank" between two tensors.
The channels are ranked according to their L2 norm between tensor1[channel]
and tensor2[channel].
Args:
tensor1: 4-D Tensor, NHWC
tensor2: 4-D Tensor, NHWC
coeffs: list of floats.
rank: integer.
Returns:
interp_latents: list of interpolated 4-D Tensors, shape=(NHWC)
"""
# sum across space, max across channels.
_, _, _, num_channels = common_layers.shape_list(tensor1)
diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2))
_, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank)
feature_rank = feature_ranks[-1]
channel_inds = tf.range(num_channels, dtype=tf.int32)
channel_mask = tf.equal(channel_inds, feature_rank)
ones_t = tf.ones(num_channels, dtype=tf.float32)
zeros_t = tf.zeros(num_channels, dtype=tf.float32)
interp_tensors = []
for coeff in coeffs:
curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t)
interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0)
|
python
|
def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):
"""Linearly interpolate channel at "rank" between two tensors.
The channels are ranked according to their L2 norm between tensor1[channel]
and tensor2[channel].
Args:
tensor1: 4-D Tensor, NHWC
tensor2: 4-D Tensor, NHWC
coeffs: list of floats.
rank: integer.
Returns:
interp_latents: list of interpolated 4-D Tensors, shape=(NHWC)
"""
# sum across space, max across channels.
_, _, _, num_channels = common_layers.shape_list(tensor1)
diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2))
_, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank)
feature_rank = feature_ranks[-1]
channel_inds = tf.range(num_channels, dtype=tf.int32)
channel_mask = tf.equal(channel_inds, feature_rank)
ones_t = tf.ones(num_channels, dtype=tf.float32)
zeros_t = tf.zeros(num_channels, dtype=tf.float32)
interp_tensors = []
for coeff in coeffs:
curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t)
interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0)
|
[
"def",
"linear_interpolate_rank",
"(",
"tensor1",
",",
"tensor2",
",",
"coeffs",
",",
"rank",
"=",
"1",
")",
":",
"# sum across space, max across channels.",
"_",
",",
"_",
",",
"_",
",",
"num_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"tensor1",
")",
"diff_sq_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"(",
"tensor1",
"-",
"tensor2",
")",
"**",
"2",
",",
"axis",
"=",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
"_",
",",
"feature_ranks",
"=",
"tf",
".",
"math",
".",
"top_k",
"(",
"diff_sq_sum",
",",
"k",
"=",
"rank",
")",
"feature_rank",
"=",
"feature_ranks",
"[",
"-",
"1",
"]",
"channel_inds",
"=",
"tf",
".",
"range",
"(",
"num_channels",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"channel_mask",
"=",
"tf",
".",
"equal",
"(",
"channel_inds",
",",
"feature_rank",
")",
"ones_t",
"=",
"tf",
".",
"ones",
"(",
"num_channels",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"zeros_t",
"=",
"tf",
".",
"zeros",
"(",
"num_channels",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"interp_tensors",
"=",
"[",
"]",
"for",
"coeff",
"in",
"coeffs",
":",
"curr_coeff",
"=",
"tf",
".",
"where",
"(",
"channel_mask",
",",
"coeff",
"*",
"ones_t",
",",
"zeros_t",
")",
"interp_tensor",
"=",
"tensor1",
"+",
"curr_coeff",
"*",
"(",
"tensor2",
"-",
"tensor1",
")",
"interp_tensors",
".",
"append",
"(",
"interp_tensor",
")",
"return",
"tf",
".",
"concat",
"(",
"interp_tensors",
",",
"axis",
"=",
"0",
")"
] |
Linearly interpolate channel at "rank" between two tensors.
The channels are ranked according to their L2 norm between tensor1[channel]
and tensor2[channel].
Args:
tensor1: 4-D Tensor, NHWC
tensor2: 4-D Tensor, NHWC
coeffs: list of floats.
rank: integer.
Returns:
interp_latents: list of interpolated 4-D Tensors, shape=(NHWC)
|
[
"Linearly",
"interpolate",
"channel",
"at",
"rank",
"between",
"two",
"tensors",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L53-L82
|
21,801
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
get_cond_latents_at_level
|
def get_cond_latents_at_level(cond_latents, level, hparams):
"""Returns a single or list of conditional latents at level 'level'."""
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
return cond_latents[level]
|
python
|
def get_cond_latents_at_level(cond_latents, level, hparams):
"""Returns a single or list of conditional latents at level 'level'."""
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
return cond_latents[level]
|
[
"def",
"get_cond_latents_at_level",
"(",
"cond_latents",
",",
"level",
",",
"hparams",
")",
":",
"if",
"cond_latents",
":",
"if",
"hparams",
".",
"latent_dist_encoder",
"in",
"[",
"\"conv_net\"",
",",
"\"conv3d_net\"",
"]",
":",
"return",
"[",
"cond_latent",
"[",
"level",
"]",
"for",
"cond_latent",
"in",
"cond_latents",
"]",
"elif",
"hparams",
".",
"latent_dist_encoder",
"in",
"[",
"\"pointwise\"",
",",
"\"conv_lstm\"",
"]",
":",
"return",
"cond_latents",
"[",
"level",
"]"
] |
Returns a single or list of conditional latents at level 'level'.
|
[
"Returns",
"a",
"single",
"or",
"list",
"of",
"conditional",
"latents",
"at",
"level",
"level",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L141-L147
|
21,802
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
check_cond_latents
|
def check_cond_latents(cond_latents, hparams):
"""Shape checking for cond_latents."""
if cond_latents is None:
return
if not isinstance(cond_latents[0], list):
cond_latents = [cond_latents]
exp_num_latents = hparams.num_cond_latents
if hparams.latent_dist_encoder == "conv_net":
exp_num_latents += int(hparams.cond_first_frame)
if len(cond_latents) != exp_num_latents:
raise ValueError("Expected number of cond_latents: %d, got %d" %
(exp_num_latents, len(cond_latents)))
for cond_latent in cond_latents:
if len(cond_latent) != hparams.n_levels - 1:
raise ValueError("Expected level_latents to be %d, got %d" %
(hparams.n_levels - 1, len(cond_latent)))
|
python
|
def check_cond_latents(cond_latents, hparams):
"""Shape checking for cond_latents."""
if cond_latents is None:
return
if not isinstance(cond_latents[0], list):
cond_latents = [cond_latents]
exp_num_latents = hparams.num_cond_latents
if hparams.latent_dist_encoder == "conv_net":
exp_num_latents += int(hparams.cond_first_frame)
if len(cond_latents) != exp_num_latents:
raise ValueError("Expected number of cond_latents: %d, got %d" %
(exp_num_latents, len(cond_latents)))
for cond_latent in cond_latents:
if len(cond_latent) != hparams.n_levels - 1:
raise ValueError("Expected level_latents to be %d, got %d" %
(hparams.n_levels - 1, len(cond_latent)))
|
[
"def",
"check_cond_latents",
"(",
"cond_latents",
",",
"hparams",
")",
":",
"if",
"cond_latents",
"is",
"None",
":",
"return",
"if",
"not",
"isinstance",
"(",
"cond_latents",
"[",
"0",
"]",
",",
"list",
")",
":",
"cond_latents",
"=",
"[",
"cond_latents",
"]",
"exp_num_latents",
"=",
"hparams",
".",
"num_cond_latents",
"if",
"hparams",
".",
"latent_dist_encoder",
"==",
"\"conv_net\"",
":",
"exp_num_latents",
"+=",
"int",
"(",
"hparams",
".",
"cond_first_frame",
")",
"if",
"len",
"(",
"cond_latents",
")",
"!=",
"exp_num_latents",
":",
"raise",
"ValueError",
"(",
"\"Expected number of cond_latents: %d, got %d\"",
"%",
"(",
"exp_num_latents",
",",
"len",
"(",
"cond_latents",
")",
")",
")",
"for",
"cond_latent",
"in",
"cond_latents",
":",
"if",
"len",
"(",
"cond_latent",
")",
"!=",
"hparams",
".",
"n_levels",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Expected level_latents to be %d, got %d\"",
"%",
"(",
"hparams",
".",
"n_levels",
"-",
"1",
",",
"len",
"(",
"cond_latent",
")",
")",
")"
] |
Shape checking for cond_latents.
|
[
"Shape",
"checking",
"for",
"cond_latents",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L150-L165
|
21,803
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
get_variable_ddi
|
def get_variable_ddi(name, shape, initial_value, dtype=tf.float32, init=False,
trainable=True):
"""Wrapper for data-dependent initialization."""
# If init is a tf bool: w is assigned dynamically at runtime.
# If init is a python bool: then w is determined during graph construction.
w = tf.get_variable(name, shape, dtype, None, trainable=trainable)
if isinstance(init, bool):
if init:
return assign(w, initial_value)
return w
else:
return tf.cond(init, lambda: assign(w, initial_value), lambda: w)
|
python
|
def get_variable_ddi(name, shape, initial_value, dtype=tf.float32, init=False,
trainable=True):
"""Wrapper for data-dependent initialization."""
# If init is a tf bool: w is assigned dynamically at runtime.
# If init is a python bool: then w is determined during graph construction.
w = tf.get_variable(name, shape, dtype, None, trainable=trainable)
if isinstance(init, bool):
if init:
return assign(w, initial_value)
return w
else:
return tf.cond(init, lambda: assign(w, initial_value), lambda: w)
|
[
"def",
"get_variable_ddi",
"(",
"name",
",",
"shape",
",",
"initial_value",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"init",
"=",
"False",
",",
"trainable",
"=",
"True",
")",
":",
"# If init is a tf bool: w is assigned dynamically at runtime.",
"# If init is a python bool: then w is determined during graph construction.",
"w",
"=",
"tf",
".",
"get_variable",
"(",
"name",
",",
"shape",
",",
"dtype",
",",
"None",
",",
"trainable",
"=",
"trainable",
")",
"if",
"isinstance",
"(",
"init",
",",
"bool",
")",
":",
"if",
"init",
":",
"return",
"assign",
"(",
"w",
",",
"initial_value",
")",
"return",
"w",
"else",
":",
"return",
"tf",
".",
"cond",
"(",
"init",
",",
"lambda",
":",
"assign",
"(",
"w",
",",
"initial_value",
")",
",",
"lambda",
":",
"w",
")"
] |
Wrapper for data-dependent initialization.
|
[
"Wrapper",
"for",
"data",
"-",
"dependent",
"initialization",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L169-L180
|
21,804
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
get_dropout
|
def get_dropout(x, rate=0.0, init=True):
"""Dropout x with dropout_rate = rate.
Apply zero dropout during init or prediction time.
Args:
x: 4-D Tensor, shape=(NHWC).
rate: Dropout rate.
init: Initialization.
Returns:
x: activations after dropout.
"""
if init or rate == 0:
return x
return tf.layers.dropout(x, rate=rate, training=True)
|
python
|
def get_dropout(x, rate=0.0, init=True):
"""Dropout x with dropout_rate = rate.
Apply zero dropout during init or prediction time.
Args:
x: 4-D Tensor, shape=(NHWC).
rate: Dropout rate.
init: Initialization.
Returns:
x: activations after dropout.
"""
if init or rate == 0:
return x
return tf.layers.dropout(x, rate=rate, training=True)
|
[
"def",
"get_dropout",
"(",
"x",
",",
"rate",
"=",
"0.0",
",",
"init",
"=",
"True",
")",
":",
"if",
"init",
"or",
"rate",
"==",
"0",
":",
"return",
"x",
"return",
"tf",
".",
"layers",
".",
"dropout",
"(",
"x",
",",
"rate",
"=",
"rate",
",",
"training",
"=",
"True",
")"
] |
Dropout x with dropout_rate = rate.
Apply zero dropout during init or prediction time.
Args:
x: 4-D Tensor, shape=(NHWC).
rate: Dropout rate.
init: Initialization.
Returns:
x: activations after dropout.
|
[
"Dropout",
"x",
"with",
"dropout_rate",
"=",
"rate",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L184-L198
|
21,805
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
actnorm_3d
|
def actnorm_3d(name, x, logscale_factor=3.):
"""Applies actnorm to each time-step independently.
There are a total of 2*n_channels*n_steps parameters learnt.
Args:
name: variable scope.
x: 5-D Tensor, (NTHWC)
logscale_factor: Increases the learning rate of the scale by
logscale_factor.
Returns:
x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.unstack(x, axis=1)
x_normed = []
for ind, x_step in enumerate(x):
x_step, _ = actnorm("actnorm_%d" % ind, x_step,
logscale_factor=logscale_factor)
x_normed.append(x_step)
return tf.stack(x_normed, axis=1), None
|
python
|
def actnorm_3d(name, x, logscale_factor=3.):
"""Applies actnorm to each time-step independently.
There are a total of 2*n_channels*n_steps parameters learnt.
Args:
name: variable scope.
x: 5-D Tensor, (NTHWC)
logscale_factor: Increases the learning rate of the scale by
logscale_factor.
Returns:
x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.unstack(x, axis=1)
x_normed = []
for ind, x_step in enumerate(x):
x_step, _ = actnorm("actnorm_%d" % ind, x_step,
logscale_factor=logscale_factor)
x_normed.append(x_step)
return tf.stack(x_normed, axis=1), None
|
[
"def",
"actnorm_3d",
"(",
"name",
",",
"x",
",",
"logscale_factor",
"=",
"3.",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x",
"=",
"tf",
".",
"unstack",
"(",
"x",
",",
"axis",
"=",
"1",
")",
"x_normed",
"=",
"[",
"]",
"for",
"ind",
",",
"x_step",
"in",
"enumerate",
"(",
"x",
")",
":",
"x_step",
",",
"_",
"=",
"actnorm",
"(",
"\"actnorm_%d\"",
"%",
"ind",
",",
"x_step",
",",
"logscale_factor",
"=",
"logscale_factor",
")",
"x_normed",
".",
"append",
"(",
"x_step",
")",
"return",
"tf",
".",
"stack",
"(",
"x_normed",
",",
"axis",
"=",
"1",
")",
",",
"None"
] |
Applies actnorm to each time-step independently.
There are a total of 2*n_channels*n_steps parameters learnt.
Args:
name: variable scope.
x: 5-D Tensor, (NTHWC)
logscale_factor: Increases the learning rate of the scale by
logscale_factor.
Returns:
x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
|
[
"Applies",
"actnorm",
"to",
"each",
"time",
"-",
"step",
"independently",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L202-L222
|
21,806
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
actnorm_center
|
def actnorm_center(name, x, reverse=False, init=False):
"""Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise.
"""
shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
assert len(shape) == 2 or len(shape) == 4
if len(shape) == 2:
x_mean = tf.reduce_mean(x, [0], keepdims=True)
b = get_variable_ddi("b", (1, shape[1]), initial_value=-x_mean,
init=init)
elif len(shape) == 4:
x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)
b = get_variable_ddi(
"b", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init)
if not reverse:
x += b
else:
x -= b
return x
|
python
|
def actnorm_center(name, x, reverse=False, init=False):
"""Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise.
"""
shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
assert len(shape) == 2 or len(shape) == 4
if len(shape) == 2:
x_mean = tf.reduce_mean(x, [0], keepdims=True)
b = get_variable_ddi("b", (1, shape[1]), initial_value=-x_mean,
init=init)
elif len(shape) == 4:
x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)
b = get_variable_ddi(
"b", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init)
if not reverse:
x += b
else:
x -= b
return x
|
[
"def",
"actnorm_center",
"(",
"name",
",",
"x",
",",
"reverse",
"=",
"False",
",",
"init",
"=",
"False",
")",
":",
"shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"assert",
"len",
"(",
"shape",
")",
"==",
"2",
"or",
"len",
"(",
"shape",
")",
"==",
"4",
"if",
"len",
"(",
"shape",
")",
"==",
"2",
":",
"x_mean",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
",",
"[",
"0",
"]",
",",
"keepdims",
"=",
"True",
")",
"b",
"=",
"get_variable_ddi",
"(",
"\"b\"",
",",
"(",
"1",
",",
"shape",
"[",
"1",
"]",
")",
",",
"initial_value",
"=",
"-",
"x_mean",
",",
"init",
"=",
"init",
")",
"elif",
"len",
"(",
"shape",
")",
"==",
"4",
":",
"x_mean",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
",",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"keepdims",
"=",
"True",
")",
"b",
"=",
"get_variable_ddi",
"(",
"\"b\"",
",",
"(",
"1",
",",
"1",
",",
"1",
",",
"shape",
"[",
"3",
"]",
")",
",",
"initial_value",
"=",
"-",
"x_mean",
",",
"init",
"=",
"init",
")",
"if",
"not",
"reverse",
":",
"x",
"+=",
"b",
"else",
":",
"x",
"-=",
"b",
"return",
"x"
] |
Add a bias to x.
Initialize such that the output of the first minibatch is zero centered
per channel.
Args:
name: scope
x: 2-D or 4-D Tensor.
reverse: Forward or backward operation.
init: data-dependent initialization.
Returns:
x_center: (x + b), if reverse is True and (x - b) otherwise.
|
[
"Add",
"a",
"bias",
"to",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L265-L296
|
21,807
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
actnorm_scale
|
def actnorm_scale(name, x, logscale_factor=3., reverse=False, init=False):
"""Per-channel scaling of x."""
x_shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# Variance initialization logic.
assert len(x_shape) == 2 or len(x_shape) == 4
if len(x_shape) == 2:
x_var = tf.reduce_mean(x**2, [0], keepdims=True)
logdet_factor = 1
var_shape = (1, x_shape[1])
elif len(x_shape) == 4:
x_var = tf.reduce_mean(x**2, [0, 1, 2], keepdims=True)
logdet_factor = x_shape[1]*x_shape[2]
var_shape = (1, 1, 1, x_shape[3])
init_value = tf.log(1.0 / (tf.sqrt(x_var) + 1e-6)) / logscale_factor
logs = get_variable_ddi("logs", var_shape, initial_value=init_value,
init=init)
logs = logs * logscale_factor
# Function and reverse function.
if not reverse:
x = x * tf.exp(logs)
else:
x = x * tf.exp(-logs)
# Objective calculation, h * w * sum(log|s|)
dlogdet = tf.reduce_sum(logs) * logdet_factor
if reverse:
dlogdet *= -1
return x, dlogdet
|
python
|
def actnorm_scale(name, x, logscale_factor=3., reverse=False, init=False):
"""Per-channel scaling of x."""
x_shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# Variance initialization logic.
assert len(x_shape) == 2 or len(x_shape) == 4
if len(x_shape) == 2:
x_var = tf.reduce_mean(x**2, [0], keepdims=True)
logdet_factor = 1
var_shape = (1, x_shape[1])
elif len(x_shape) == 4:
x_var = tf.reduce_mean(x**2, [0, 1, 2], keepdims=True)
logdet_factor = x_shape[1]*x_shape[2]
var_shape = (1, 1, 1, x_shape[3])
init_value = tf.log(1.0 / (tf.sqrt(x_var) + 1e-6)) / logscale_factor
logs = get_variable_ddi("logs", var_shape, initial_value=init_value,
init=init)
logs = logs * logscale_factor
# Function and reverse function.
if not reverse:
x = x * tf.exp(logs)
else:
x = x * tf.exp(-logs)
# Objective calculation, h * w * sum(log|s|)
dlogdet = tf.reduce_sum(logs) * logdet_factor
if reverse:
dlogdet *= -1
return x, dlogdet
|
[
"def",
"actnorm_scale",
"(",
"name",
",",
"x",
",",
"logscale_factor",
"=",
"3.",
",",
"reverse",
"=",
"False",
",",
"init",
"=",
"False",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"# Variance initialization logic.",
"assert",
"len",
"(",
"x_shape",
")",
"==",
"2",
"or",
"len",
"(",
"x_shape",
")",
"==",
"4",
"if",
"len",
"(",
"x_shape",
")",
"==",
"2",
":",
"x_var",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
"**",
"2",
",",
"[",
"0",
"]",
",",
"keepdims",
"=",
"True",
")",
"logdet_factor",
"=",
"1",
"var_shape",
"=",
"(",
"1",
",",
"x_shape",
"[",
"1",
"]",
")",
"elif",
"len",
"(",
"x_shape",
")",
"==",
"4",
":",
"x_var",
"=",
"tf",
".",
"reduce_mean",
"(",
"x",
"**",
"2",
",",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"keepdims",
"=",
"True",
")",
"logdet_factor",
"=",
"x_shape",
"[",
"1",
"]",
"*",
"x_shape",
"[",
"2",
"]",
"var_shape",
"=",
"(",
"1",
",",
"1",
",",
"1",
",",
"x_shape",
"[",
"3",
"]",
")",
"init_value",
"=",
"tf",
".",
"log",
"(",
"1.0",
"/",
"(",
"tf",
".",
"sqrt",
"(",
"x_var",
")",
"+",
"1e-6",
")",
")",
"/",
"logscale_factor",
"logs",
"=",
"get_variable_ddi",
"(",
"\"logs\"",
",",
"var_shape",
",",
"initial_value",
"=",
"init_value",
",",
"init",
"=",
"init",
")",
"logs",
"=",
"logs",
"*",
"logscale_factor",
"# Function and reverse function.",
"if",
"not",
"reverse",
":",
"x",
"=",
"x",
"*",
"tf",
".",
"exp",
"(",
"logs",
")",
"else",
":",
"x",
"=",
"x",
"*",
"tf",
".",
"exp",
"(",
"-",
"logs",
")",
"# Objective calculation, h * w * sum(log|s|)",
"dlogdet",
"=",
"tf",
".",
"reduce_sum",
"(",
"logs",
")",
"*",
"logdet_factor",
"if",
"reverse",
":",
"dlogdet",
"*=",
"-",
"1",
"return",
"x",
",",
"dlogdet"
] |
Per-channel scaling of x.
|
[
"Per",
"-",
"channel",
"scaling",
"of",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L300-L331
|
21,808
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
invertible_1x1_conv
|
def invertible_1x1_conv(name, x, reverse=False):
"""1X1 convolution on x.
The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where
1. P is a permutation matrix.
2. L is a lower triangular matrix with diagonal entries unity.
3. U is a upper triangular matrix where the diagonal entries zero.
4. s is a vector.
sign(s) and P are fixed and the remaining are optimized. P, L, U and s are
initialized by the PLU decomposition of a random rotation matrix.
Args:
name: scope
x: Input Tensor.
reverse: whether the pass is from z -> x or x -> z.
Returns:
x_conv: x after a 1X1 convolution is applied on x.
objective: sum(log(s))
"""
_, height, width, channels = common_layers.shape_list(x)
w_shape = [channels, channels]
# Random rotation-matrix Q
random_matrix = np.random.rand(channels, channels)
np_w = scipy.linalg.qr(random_matrix)[0].astype("float32")
# Initialize P,L,U and s from the LU decomposition of a random rotation matrix
np_p, np_l, np_u = scipy.linalg.lu(np_w)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
p = tf.get_variable("P", initializer=np_p, trainable=False)
l = tf.get_variable("L", initializer=np_l)
sign_s = tf.get_variable(
"sign_S", initializer=np_sign_s, trainable=False)
log_s = tf.get_variable("log_S", initializer=np_log_s)
u = tf.get_variable("U", initializer=np_u)
# W = P * L * (U + sign_s * exp(log_s))
l_mask = np.tril(np.ones([channels, channels], dtype=np.float32), -1)
l = l * l_mask + tf.eye(channels, channels)
u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s))
w = tf.matmul(p, tf.matmul(l, u))
# If height or width cannot be statically determined then they end up as
# tf.int32 tensors, which cannot be directly multiplied with a floating
# point tensor without a cast.
objective = tf.reduce_sum(log_s) * tf.cast(height * width, log_s.dtype)
if not reverse:
w = tf.reshape(w, [1, 1] + w_shape)
x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC")
else:
# TODO(b/111271662): Remove when supported.
def tpu_inv(m):
"""tf.linalg.inv workaround until it is supported on TPU."""
q, r = tf.linalg.qr(m)
return tf.linalg.triangular_solve(r, tf.transpose(q), lower=False)
w_inv = tf.reshape(tpu_inv(w), [1, 1]+w_shape)
x = tf.nn.conv2d(
x, w_inv, [1, 1, 1, 1], "SAME", data_format="NHWC")
objective *= -1
return x, objective
|
python
|
def invertible_1x1_conv(name, x, reverse=False):
"""1X1 convolution on x.
The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where
1. P is a permutation matrix.
2. L is a lower triangular matrix with diagonal entries unity.
3. U is a upper triangular matrix where the diagonal entries zero.
4. s is a vector.
sign(s) and P are fixed and the remaining are optimized. P, L, U and s are
initialized by the PLU decomposition of a random rotation matrix.
Args:
name: scope
x: Input Tensor.
reverse: whether the pass is from z -> x or x -> z.
Returns:
x_conv: x after a 1X1 convolution is applied on x.
objective: sum(log(s))
"""
_, height, width, channels = common_layers.shape_list(x)
w_shape = [channels, channels]
# Random rotation-matrix Q
random_matrix = np.random.rand(channels, channels)
np_w = scipy.linalg.qr(random_matrix)[0].astype("float32")
# Initialize P,L,U and s from the LU decomposition of a random rotation matrix
np_p, np_l, np_u = scipy.linalg.lu(np_w)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
p = tf.get_variable("P", initializer=np_p, trainable=False)
l = tf.get_variable("L", initializer=np_l)
sign_s = tf.get_variable(
"sign_S", initializer=np_sign_s, trainable=False)
log_s = tf.get_variable("log_S", initializer=np_log_s)
u = tf.get_variable("U", initializer=np_u)
# W = P * L * (U + sign_s * exp(log_s))
l_mask = np.tril(np.ones([channels, channels], dtype=np.float32), -1)
l = l * l_mask + tf.eye(channels, channels)
u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s))
w = tf.matmul(p, tf.matmul(l, u))
# If height or width cannot be statically determined then they end up as
# tf.int32 tensors, which cannot be directly multiplied with a floating
# point tensor without a cast.
objective = tf.reduce_sum(log_s) * tf.cast(height * width, log_s.dtype)
if not reverse:
w = tf.reshape(w, [1, 1] + w_shape)
x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC")
else:
# TODO(b/111271662): Remove when supported.
def tpu_inv(m):
"""tf.linalg.inv workaround until it is supported on TPU."""
q, r = tf.linalg.qr(m)
return tf.linalg.triangular_solve(r, tf.transpose(q), lower=False)
w_inv = tf.reshape(tpu_inv(w), [1, 1]+w_shape)
x = tf.nn.conv2d(
x, w_inv, [1, 1, 1, 1], "SAME", data_format="NHWC")
objective *= -1
return x, objective
|
[
"def",
"invertible_1x1_conv",
"(",
"name",
",",
"x",
",",
"reverse",
"=",
"False",
")",
":",
"_",
",",
"height",
",",
"width",
",",
"channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"w_shape",
"=",
"[",
"channels",
",",
"channels",
"]",
"# Random rotation-matrix Q",
"random_matrix",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"channels",
",",
"channels",
")",
"np_w",
"=",
"scipy",
".",
"linalg",
".",
"qr",
"(",
"random_matrix",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"\"float32\"",
")",
"# Initialize P,L,U and s from the LU decomposition of a random rotation matrix",
"np_p",
",",
"np_l",
",",
"np_u",
"=",
"scipy",
".",
"linalg",
".",
"lu",
"(",
"np_w",
")",
"np_s",
"=",
"np",
".",
"diag",
"(",
"np_u",
")",
"np_sign_s",
"=",
"np",
".",
"sign",
"(",
"np_s",
")",
"np_log_s",
"=",
"np",
".",
"log",
"(",
"np",
".",
"abs",
"(",
"np_s",
")",
")",
"np_u",
"=",
"np",
".",
"triu",
"(",
"np_u",
",",
"k",
"=",
"1",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"p",
"=",
"tf",
".",
"get_variable",
"(",
"\"P\"",
",",
"initializer",
"=",
"np_p",
",",
"trainable",
"=",
"False",
")",
"l",
"=",
"tf",
".",
"get_variable",
"(",
"\"L\"",
",",
"initializer",
"=",
"np_l",
")",
"sign_s",
"=",
"tf",
".",
"get_variable",
"(",
"\"sign_S\"",
",",
"initializer",
"=",
"np_sign_s",
",",
"trainable",
"=",
"False",
")",
"log_s",
"=",
"tf",
".",
"get_variable",
"(",
"\"log_S\"",
",",
"initializer",
"=",
"np_log_s",
")",
"u",
"=",
"tf",
".",
"get_variable",
"(",
"\"U\"",
",",
"initializer",
"=",
"np_u",
")",
"# W = P * L * (U + sign_s * exp(log_s))",
"l_mask",
"=",
"np",
".",
"tril",
"(",
"np",
".",
"ones",
"(",
"[",
"channels",
",",
"channels",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"-",
"1",
")",
"l",
"=",
"l",
"*",
"l_mask",
"+",
"tf",
".",
"eye",
"(",
"channels",
",",
"channels",
")",
"u",
"=",
"u",
"*",
"np",
".",
"transpose",
"(",
"l_mask",
")",
"+",
"tf",
".",
"diag",
"(",
"sign_s",
"*",
"tf",
".",
"exp",
"(",
"log_s",
")",
")",
"w",
"=",
"tf",
".",
"matmul",
"(",
"p",
",",
"tf",
".",
"matmul",
"(",
"l",
",",
"u",
")",
")",
"# If height or width cannot be statically determined then they end up as",
"# tf.int32 tensors, which cannot be directly multiplied with a floating",
"# point tensor without a cast.",
"objective",
"=",
"tf",
".",
"reduce_sum",
"(",
"log_s",
")",
"*",
"tf",
".",
"cast",
"(",
"height",
"*",
"width",
",",
"log_s",
".",
"dtype",
")",
"if",
"not",
"reverse",
":",
"w",
"=",
"tf",
".",
"reshape",
"(",
"w",
",",
"[",
"1",
",",
"1",
"]",
"+",
"w_shape",
")",
"x",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"x",
",",
"w",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"\"SAME\"",
",",
"data_format",
"=",
"\"NHWC\"",
")",
"else",
":",
"# TODO(b/111271662): Remove when supported.",
"def",
"tpu_inv",
"(",
"m",
")",
":",
"\"\"\"tf.linalg.inv workaround until it is supported on TPU.\"\"\"",
"q",
",",
"r",
"=",
"tf",
".",
"linalg",
".",
"qr",
"(",
"m",
")",
"return",
"tf",
".",
"linalg",
".",
"triangular_solve",
"(",
"r",
",",
"tf",
".",
"transpose",
"(",
"q",
")",
",",
"lower",
"=",
"False",
")",
"w_inv",
"=",
"tf",
".",
"reshape",
"(",
"tpu_inv",
"(",
"w",
")",
",",
"[",
"1",
",",
"1",
"]",
"+",
"w_shape",
")",
"x",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"(",
"x",
",",
"w_inv",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"\"SAME\"",
",",
"data_format",
"=",
"\"NHWC\"",
")",
"objective",
"*=",
"-",
"1",
"return",
"x",
",",
"objective"
] |
1X1 convolution on x.
The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where
1. P is a permutation matrix.
2. L is a lower triangular matrix with diagonal entries unity.
3. U is a upper triangular matrix where the diagonal entries zero.
4. s is a vector.
sign(s) and P are fixed and the remaining are optimized. P, L, U and s are
initialized by the PLU decomposition of a random rotation matrix.
Args:
name: scope
x: Input Tensor.
reverse: whether the pass is from z -> x or x -> z.
Returns:
x_conv: x after a 1X1 convolution is applied on x.
objective: sum(log(s))
|
[
"1X1",
"convolution",
"on",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L335-L401
|
21,809
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
add_edge_bias
|
def add_edge_bias(x, filter_size):
"""Pad x and concatenates an edge bias across the depth of x.
The edge bias can be thought of as a binary feature which is unity when
the filter is being convolved over an edge and zero otherwise.
Args:
x: Input tensor, shape (NHWC)
filter_size: filter_size to determine padding.
Returns:
x_pad: Input tensor, shape (NHW(c+1))
"""
x_shape = common_layers.shape_list(x)
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1) // 2 # vertical padding size
b = (filter_size[1] - 1) // 2 # horizontal padding size
padding = [[0, 0], [a, a], [b, b], [0, 0]]
x_bias = tf.zeros(x_shape[:-1] + [1])
x = tf.pad(x, padding)
x_pad = tf.pad(x_bias, padding, constant_values=1)
return tf.concat([x, x_pad], axis=3)
|
python
|
def add_edge_bias(x, filter_size):
"""Pad x and concatenates an edge bias across the depth of x.
The edge bias can be thought of as a binary feature which is unity when
the filter is being convolved over an edge and zero otherwise.
Args:
x: Input tensor, shape (NHWC)
filter_size: filter_size to determine padding.
Returns:
x_pad: Input tensor, shape (NHW(c+1))
"""
x_shape = common_layers.shape_list(x)
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1) // 2 # vertical padding size
b = (filter_size[1] - 1) // 2 # horizontal padding size
padding = [[0, 0], [a, a], [b, b], [0, 0]]
x_bias = tf.zeros(x_shape[:-1] + [1])
x = tf.pad(x, padding)
x_pad = tf.pad(x_bias, padding, constant_values=1)
return tf.concat([x, x_pad], axis=3)
|
[
"def",
"add_edge_bias",
"(",
"x",
",",
"filter_size",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"filter_size",
"[",
"0",
"]",
"==",
"1",
"and",
"filter_size",
"[",
"1",
"]",
"==",
"1",
":",
"return",
"x",
"a",
"=",
"(",
"filter_size",
"[",
"0",
"]",
"-",
"1",
")",
"//",
"2",
"# vertical padding size",
"b",
"=",
"(",
"filter_size",
"[",
"1",
"]",
"-",
"1",
")",
"//",
"2",
"# horizontal padding size",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"a",
",",
"a",
"]",
",",
"[",
"b",
",",
"b",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"x_bias",
"=",
"tf",
".",
"zeros",
"(",
"x_shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
")",
"x",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"padding",
")",
"x_pad",
"=",
"tf",
".",
"pad",
"(",
"x_bias",
",",
"padding",
",",
"constant_values",
"=",
"1",
")",
"return",
"tf",
".",
"concat",
"(",
"[",
"x",
",",
"x_pad",
"]",
",",
"axis",
"=",
"3",
")"
] |
Pad x and concatenates an edge bias across the depth of x.
The edge bias can be thought of as a binary feature which is unity when
the filter is being convolved over an edge and zero otherwise.
Args:
x: Input tensor, shape (NHWC)
filter_size: filter_size to determine padding.
Returns:
x_pad: Input tensor, shape (NHW(c+1))
|
[
"Pad",
"x",
"and",
"concatenates",
"an",
"edge",
"bias",
"across",
"the",
"depth",
"of",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L404-L426
|
21,810
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
time_pad
|
def time_pad(x, filter_size, dilations):
"""Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor.
"""
x_shape = common_layers.shape_list(x)
if filter_size == [1, 1, 1]:
return x
_, h, w = filter_size
eff_h = h + (h - 1)*(dilations[2] - 1)
eff_w = w + (w - 1)*(dilations[3] - 1)
a = (eff_h - 1) // 2 # vertical padding size
b = (eff_w - 1) // 2 # horizontal padding size
c = filter_size[0] - 1
# pad across edges.
padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]]
# concat a binary feature across channels to indicate a padding.
# 1 indicates that the feature is a padding.
x_bias = tf.zeros(x_shape[:-1] + [1])
x_bias = tf.pad(x_bias, padding, constant_values=1)
x_pad = tf.pad(x, padding)
x_pad = tf.concat((x_bias, x_pad), axis=-1)
return x_pad
|
python
|
def time_pad(x, filter_size, dilations):
"""Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor.
"""
x_shape = common_layers.shape_list(x)
if filter_size == [1, 1, 1]:
return x
_, h, w = filter_size
eff_h = h + (h - 1)*(dilations[2] - 1)
eff_w = w + (w - 1)*(dilations[3] - 1)
a = (eff_h - 1) // 2 # vertical padding size
b = (eff_w - 1) // 2 # horizontal padding size
c = filter_size[0] - 1
# pad across edges.
padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]]
# concat a binary feature across channels to indicate a padding.
# 1 indicates that the feature is a padding.
x_bias = tf.zeros(x_shape[:-1] + [1])
x_bias = tf.pad(x_bias, padding, constant_values=1)
x_pad = tf.pad(x, padding)
x_pad = tf.concat((x_bias, x_pad), axis=-1)
return x_pad
|
[
"def",
"time_pad",
"(",
"x",
",",
"filter_size",
",",
"dilations",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"filter_size",
"==",
"[",
"1",
",",
"1",
",",
"1",
"]",
":",
"return",
"x",
"_",
",",
"h",
",",
"w",
"=",
"filter_size",
"eff_h",
"=",
"h",
"+",
"(",
"h",
"-",
"1",
")",
"*",
"(",
"dilations",
"[",
"2",
"]",
"-",
"1",
")",
"eff_w",
"=",
"w",
"+",
"(",
"w",
"-",
"1",
")",
"*",
"(",
"dilations",
"[",
"3",
"]",
"-",
"1",
")",
"a",
"=",
"(",
"eff_h",
"-",
"1",
")",
"//",
"2",
"# vertical padding size",
"b",
"=",
"(",
"eff_w",
"-",
"1",
")",
"//",
"2",
"# horizontal padding size",
"c",
"=",
"filter_size",
"[",
"0",
"]",
"-",
"1",
"# pad across edges.",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"c",
",",
"0",
"]",
",",
"[",
"a",
",",
"a",
"]",
",",
"[",
"b",
",",
"b",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"# concat a binary feature across channels to indicate a padding.",
"# 1 indicates that the feature is a padding.",
"x_bias",
"=",
"tf",
".",
"zeros",
"(",
"x_shape",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
")",
"x_bias",
"=",
"tf",
".",
"pad",
"(",
"x_bias",
",",
"padding",
",",
"constant_values",
"=",
"1",
")",
"x_pad",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"padding",
")",
"x_pad",
"=",
"tf",
".",
"concat",
"(",
"(",
"x_bias",
",",
"x_pad",
")",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"x_pad"
] |
Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor.
|
[
"Pad",
"left",
"across",
"time",
"and",
"pad",
"valid",
"across",
"the",
"spatial",
"components",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L429-L461
|
21,811
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
conv
|
def conv(name, x, output_channels, filter_size=None, stride=None,
logscale_factor=3.0, apply_actnorm=True, conv_init="default",
dilations=None):
"""Convolutional layer with edge bias padding and optional actnorm.
If x is 5-dimensional, actnorm is applied independently across every
time-step.
Args:
name: variable scope.
x: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC
output_channels: Number of output channels.
filter_size: list of ints, if None [3, 3] and [2, 3, 3] are defaults for
4-D and 5-D input tensors respectively.
stride: list of ints, default stride: 1
logscale_factor: see actnorm for parameter meaning.
apply_actnorm: if apply_actnorm the activations of the first minibatch
have zero mean and unit variance. Else, there is no scaling
applied.
conv_init: default or zeros. default is a normal distribution with 0.05 std.
dilations: List of integers, apply dilations.
Returns:
x: actnorm(conv2d(x))
Raises:
ValueError: if init is set to "zeros" and apply_actnorm is set to True.
"""
if conv_init == "zeros" and apply_actnorm:
raise ValueError("apply_actnorm is unstable when init is set to zeros.")
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
# set filter_size, stride and in_channels
if is_2d:
if filter_size is None:
filter_size = [3, 3]
if stride is None:
stride = [1, 1]
if dilations is None:
dilations = [1, 1, 1, 1]
actnorm_func = actnorm
x = add_edge_bias(x, filter_size=filter_size)
conv_filter = tf.nn.conv2d
else:
if filter_size is None:
if num_steps == 1:
filter_size = [1, 3, 3]
else:
filter_size = [2, 3, 3]
if stride is None:
stride = [1, 1, 1]
if dilations is None:
dilations = [1, 1, 1, 1, 1]
actnorm_func = actnorm_3d
x = time_pad(x, filter_size=filter_size, dilations=dilations)
conv_filter = tf.nn.conv3d
in_channels = common_layers.shape_list(x)[-1]
filter_shape = filter_size + [in_channels, output_channels]
stride_shape = [1] + stride + [1]
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if conv_init == "default":
initializer = default_initializer()
elif conv_init == "zeros":
initializer = tf.zeros_initializer()
w = tf.get_variable("W", filter_shape, tf.float32, initializer=initializer)
x = conv_filter(x, w, stride_shape, padding="VALID", dilations=dilations)
if apply_actnorm:
x, _ = actnorm_func("actnorm", x, logscale_factor=logscale_factor)
else:
x += tf.get_variable("b", [1, 1, 1, output_channels],
initializer=tf.zeros_initializer())
logs = tf.get_variable("logs", [1, output_channels],
initializer=tf.zeros_initializer())
x *= tf.exp(logs * logscale_factor)
return x
|
python
|
def conv(name, x, output_channels, filter_size=None, stride=None,
logscale_factor=3.0, apply_actnorm=True, conv_init="default",
dilations=None):
"""Convolutional layer with edge bias padding and optional actnorm.
If x is 5-dimensional, actnorm is applied independently across every
time-step.
Args:
name: variable scope.
x: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC
output_channels: Number of output channels.
filter_size: list of ints, if None [3, 3] and [2, 3, 3] are defaults for
4-D and 5-D input tensors respectively.
stride: list of ints, default stride: 1
logscale_factor: see actnorm for parameter meaning.
apply_actnorm: if apply_actnorm the activations of the first minibatch
have zero mean and unit variance. Else, there is no scaling
applied.
conv_init: default or zeros. default is a normal distribution with 0.05 std.
dilations: List of integers, apply dilations.
Returns:
x: actnorm(conv2d(x))
Raises:
ValueError: if init is set to "zeros" and apply_actnorm is set to True.
"""
if conv_init == "zeros" and apply_actnorm:
raise ValueError("apply_actnorm is unstable when init is set to zeros.")
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
# set filter_size, stride and in_channels
if is_2d:
if filter_size is None:
filter_size = [3, 3]
if stride is None:
stride = [1, 1]
if dilations is None:
dilations = [1, 1, 1, 1]
actnorm_func = actnorm
x = add_edge_bias(x, filter_size=filter_size)
conv_filter = tf.nn.conv2d
else:
if filter_size is None:
if num_steps == 1:
filter_size = [1, 3, 3]
else:
filter_size = [2, 3, 3]
if stride is None:
stride = [1, 1, 1]
if dilations is None:
dilations = [1, 1, 1, 1, 1]
actnorm_func = actnorm_3d
x = time_pad(x, filter_size=filter_size, dilations=dilations)
conv_filter = tf.nn.conv3d
in_channels = common_layers.shape_list(x)[-1]
filter_shape = filter_size + [in_channels, output_channels]
stride_shape = [1] + stride + [1]
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if conv_init == "default":
initializer = default_initializer()
elif conv_init == "zeros":
initializer = tf.zeros_initializer()
w = tf.get_variable("W", filter_shape, tf.float32, initializer=initializer)
x = conv_filter(x, w, stride_shape, padding="VALID", dilations=dilations)
if apply_actnorm:
x, _ = actnorm_func("actnorm", x, logscale_factor=logscale_factor)
else:
x += tf.get_variable("b", [1, 1, 1, output_channels],
initializer=tf.zeros_initializer())
logs = tf.get_variable("logs", [1, output_channels],
initializer=tf.zeros_initializer())
x *= tf.exp(logs * logscale_factor)
return x
|
[
"def",
"conv",
"(",
"name",
",",
"x",
",",
"output_channels",
",",
"filter_size",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"logscale_factor",
"=",
"3.0",
",",
"apply_actnorm",
"=",
"True",
",",
"conv_init",
"=",
"\"default\"",
",",
"dilations",
"=",
"None",
")",
":",
"if",
"conv_init",
"==",
"\"zeros\"",
"and",
"apply_actnorm",
":",
"raise",
"ValueError",
"(",
"\"apply_actnorm is unstable when init is set to zeros.\"",
")",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"is_2d",
"=",
"len",
"(",
"x_shape",
")",
"==",
"4",
"num_steps",
"=",
"x_shape",
"[",
"1",
"]",
"# set filter_size, stride and in_channels",
"if",
"is_2d",
":",
"if",
"filter_size",
"is",
"None",
":",
"filter_size",
"=",
"[",
"3",
",",
"3",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
",",
"1",
"]",
"if",
"dilations",
"is",
"None",
":",
"dilations",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
"actnorm_func",
"=",
"actnorm",
"x",
"=",
"add_edge_bias",
"(",
"x",
",",
"filter_size",
"=",
"filter_size",
")",
"conv_filter",
"=",
"tf",
".",
"nn",
".",
"conv2d",
"else",
":",
"if",
"filter_size",
"is",
"None",
":",
"if",
"num_steps",
"==",
"1",
":",
"filter_size",
"=",
"[",
"1",
",",
"3",
",",
"3",
"]",
"else",
":",
"filter_size",
"=",
"[",
"2",
",",
"3",
",",
"3",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
",",
"1",
",",
"1",
"]",
"if",
"dilations",
"is",
"None",
":",
"dilations",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
"actnorm_func",
"=",
"actnorm_3d",
"x",
"=",
"time_pad",
"(",
"x",
",",
"filter_size",
"=",
"filter_size",
",",
"dilations",
"=",
"dilations",
")",
"conv_filter",
"=",
"tf",
".",
"nn",
".",
"conv3d",
"in_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"-",
"1",
"]",
"filter_shape",
"=",
"filter_size",
"+",
"[",
"in_channels",
",",
"output_channels",
"]",
"stride_shape",
"=",
"[",
"1",
"]",
"+",
"stride",
"+",
"[",
"1",
"]",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"if",
"conv_init",
"==",
"\"default\"",
":",
"initializer",
"=",
"default_initializer",
"(",
")",
"elif",
"conv_init",
"==",
"\"zeros\"",
":",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
"w",
"=",
"tf",
".",
"get_variable",
"(",
"\"W\"",
",",
"filter_shape",
",",
"tf",
".",
"float32",
",",
"initializer",
"=",
"initializer",
")",
"x",
"=",
"conv_filter",
"(",
"x",
",",
"w",
",",
"stride_shape",
",",
"padding",
"=",
"\"VALID\"",
",",
"dilations",
"=",
"dilations",
")",
"if",
"apply_actnorm",
":",
"x",
",",
"_",
"=",
"actnorm_func",
"(",
"\"actnorm\"",
",",
"x",
",",
"logscale_factor",
"=",
"logscale_factor",
")",
"else",
":",
"x",
"+=",
"tf",
".",
"get_variable",
"(",
"\"b\"",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"output_channels",
"]",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
")",
"logs",
"=",
"tf",
".",
"get_variable",
"(",
"\"logs\"",
",",
"[",
"1",
",",
"output_channels",
"]",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
")",
"x",
"*=",
"tf",
".",
"exp",
"(",
"logs",
"*",
"logscale_factor",
")",
"return",
"x"
] |
Convolutional layer with edge bias padding and optional actnorm.
If x is 5-dimensional, actnorm is applied independently across every
time-step.
Args:
name: variable scope.
x: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC
output_channels: Number of output channels.
filter_size: list of ints, if None [3, 3] and [2, 3, 3] are defaults for
4-D and 5-D input tensors respectively.
stride: list of ints, default stride: 1
logscale_factor: see actnorm for parameter meaning.
apply_actnorm: if apply_actnorm the activations of the first minibatch
have zero mean and unit variance. Else, there is no scaling
applied.
conv_init: default or zeros. default is a normal distribution with 0.05 std.
dilations: List of integers, apply dilations.
Returns:
x: actnorm(conv2d(x))
Raises:
ValueError: if init is set to "zeros" and apply_actnorm is set to True.
|
[
"Convolutional",
"layer",
"with",
"edge",
"bias",
"padding",
"and",
"optional",
"actnorm",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L465-L544
|
21,812
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
conv_block
|
def conv_block(name, x, mid_channels, dilations=None, activation="relu",
dropout=0.0):
"""2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
if is_2d:
first_filter = [3, 3]
second_filter = [1, 1]
else:
# special case when number of steps equal 1 to avoid
# padding.
if num_steps == 1:
first_filter = [1, 3, 3]
else:
first_filter = [2, 3, 3]
second_filter = [1, 1, 1]
# Edge Padding + conv2d + actnorm + relu:
# [output: 512 channels]
x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter,
dilations=dilations)
x = tf.nn.relu(x)
x = get_dropout(x, rate=dropout)
# Padding + conv2d + actnorm + activation.
# [input, output: 512 channels]
if activation == "relu":
x = conv("1_2", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.relu(x)
elif activation == "gatu":
# x = tanh(w1*x) * sigm(w2*x)
x_tanh = conv("1_tanh", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x_sigm = conv("1_sigm", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm)
x = get_dropout(x, rate=dropout)
return x
|
python
|
def conv_block(name, x, mid_channels, dilations=None, activation="relu",
dropout=0.0):
"""2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
if is_2d:
first_filter = [3, 3]
second_filter = [1, 1]
else:
# special case when number of steps equal 1 to avoid
# padding.
if num_steps == 1:
first_filter = [1, 3, 3]
else:
first_filter = [2, 3, 3]
second_filter = [1, 1, 1]
# Edge Padding + conv2d + actnorm + relu:
# [output: 512 channels]
x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter,
dilations=dilations)
x = tf.nn.relu(x)
x = get_dropout(x, rate=dropout)
# Padding + conv2d + actnorm + activation.
# [input, output: 512 channels]
if activation == "relu":
x = conv("1_2", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.relu(x)
elif activation == "gatu":
# x = tanh(w1*x) * sigm(w2*x)
x_tanh = conv("1_tanh", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x_sigm = conv("1_sigm", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm)
x = get_dropout(x, rate=dropout)
return x
|
[
"def",
"conv_block",
"(",
"name",
",",
"x",
",",
"mid_channels",
",",
"dilations",
"=",
"None",
",",
"activation",
"=",
"\"relu\"",
",",
"dropout",
"=",
"0.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"is_2d",
"=",
"len",
"(",
"x_shape",
")",
"==",
"4",
"num_steps",
"=",
"x_shape",
"[",
"1",
"]",
"if",
"is_2d",
":",
"first_filter",
"=",
"[",
"3",
",",
"3",
"]",
"second_filter",
"=",
"[",
"1",
",",
"1",
"]",
"else",
":",
"# special case when number of steps equal 1 to avoid",
"# padding.",
"if",
"num_steps",
"==",
"1",
":",
"first_filter",
"=",
"[",
"1",
",",
"3",
",",
"3",
"]",
"else",
":",
"first_filter",
"=",
"[",
"2",
",",
"3",
",",
"3",
"]",
"second_filter",
"=",
"[",
"1",
",",
"1",
",",
"1",
"]",
"# Edge Padding + conv2d + actnorm + relu:",
"# [output: 512 channels]",
"x",
"=",
"conv",
"(",
"\"1_1\"",
",",
"x",
",",
"output_channels",
"=",
"mid_channels",
",",
"filter_size",
"=",
"first_filter",
",",
"dilations",
"=",
"dilations",
")",
"x",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"x",
")",
"x",
"=",
"get_dropout",
"(",
"x",
",",
"rate",
"=",
"dropout",
")",
"# Padding + conv2d + actnorm + activation.",
"# [input, output: 512 channels]",
"if",
"activation",
"==",
"\"relu\"",
":",
"x",
"=",
"conv",
"(",
"\"1_2\"",
",",
"x",
",",
"output_channels",
"=",
"mid_channels",
",",
"filter_size",
"=",
"second_filter",
",",
"dilations",
"=",
"dilations",
")",
"x",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"x",
")",
"elif",
"activation",
"==",
"\"gatu\"",
":",
"# x = tanh(w1*x) * sigm(w2*x)",
"x_tanh",
"=",
"conv",
"(",
"\"1_tanh\"",
",",
"x",
",",
"output_channels",
"=",
"mid_channels",
",",
"filter_size",
"=",
"second_filter",
",",
"dilations",
"=",
"dilations",
")",
"x_sigm",
"=",
"conv",
"(",
"\"1_sigm\"",
",",
"x",
",",
"output_channels",
"=",
"mid_channels",
",",
"filter_size",
"=",
"second_filter",
",",
"dilations",
"=",
"dilations",
")",
"x",
"=",
"tf",
".",
"nn",
".",
"tanh",
"(",
"x_tanh",
")",
"*",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"x_sigm",
")",
"x",
"=",
"get_dropout",
"(",
"x",
",",
"rate",
"=",
"dropout",
")",
"return",
"x"
] |
2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
|
[
"2",
"layer",
"conv",
"block",
"used",
"in",
"the",
"affine",
"coupling",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L548-L603
|
21,813
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
dilated_conv_stack
|
def dilated_conv_stack(name, x, mid_channels, output_channels,
dilation_rates, activation="relu",
dropout=0.0):
"""Dilated convolutional stack.
Features at different rates are computed independently using a 3 layer
convolutional stack and added.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer in the conv
stack.
output_channels: Number of output channels of the last layer.
dilation_rates: A list of dilation rates.
activation: Can be either "relu" or "gatu"
dropout: dropout.
Returns:
output: 5-D Tensor.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output = 0.0
for dil_ind, dil_rate in enumerate(dilation_rates):
# TODO(mechcoder) try (concat across channels + 1x1) modulo memory issues.
curr_out = conv_stack("dil_%d" % dil_ind, x, mid_channels=mid_channels,
output_channels=output_channels, dilations=dil_rate,
activation=activation, dropout=dropout)
output += curr_out
return output
|
python
|
def dilated_conv_stack(name, x, mid_channels, output_channels,
dilation_rates, activation="relu",
dropout=0.0):
"""Dilated convolutional stack.
Features at different rates are computed independently using a 3 layer
convolutional stack and added.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer in the conv
stack.
output_channels: Number of output channels of the last layer.
dilation_rates: A list of dilation rates.
activation: Can be either "relu" or "gatu"
dropout: dropout.
Returns:
output: 5-D Tensor.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output = 0.0
for dil_ind, dil_rate in enumerate(dilation_rates):
# TODO(mechcoder) try (concat across channels + 1x1) modulo memory issues.
curr_out = conv_stack("dil_%d" % dil_ind, x, mid_channels=mid_channels,
output_channels=output_channels, dilations=dil_rate,
activation=activation, dropout=dropout)
output += curr_out
return output
|
[
"def",
"dilated_conv_stack",
"(",
"name",
",",
"x",
",",
"mid_channels",
",",
"output_channels",
",",
"dilation_rates",
",",
"activation",
"=",
"\"relu\"",
",",
"dropout",
"=",
"0.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"output",
"=",
"0.0",
"for",
"dil_ind",
",",
"dil_rate",
"in",
"enumerate",
"(",
"dilation_rates",
")",
":",
"# TODO(mechcoder) try (concat across channels + 1x1) modulo memory issues.",
"curr_out",
"=",
"conv_stack",
"(",
"\"dil_%d\"",
"%",
"dil_ind",
",",
"x",
",",
"mid_channels",
"=",
"mid_channels",
",",
"output_channels",
"=",
"output_channels",
",",
"dilations",
"=",
"dil_rate",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"output",
"+=",
"curr_out",
"return",
"output"
] |
Dilated convolutional stack.
Features at different rates are computed independently using a 3 layer
convolutional stack and added.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer in the conv
stack.
output_channels: Number of output channels of the last layer.
dilation_rates: A list of dilation rates.
activation: Can be either "relu" or "gatu"
dropout: dropout.
Returns:
output: 5-D Tensor.
|
[
"Dilated",
"convolutional",
"stack",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L606-L634
|
21,814
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
conv_stack
|
def conv_stack(name, x, mid_channels, output_channels, dilations=None,
activation="relu", dropout=0.0):
"""3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block("conv_block", x, mid_channels=mid_channels,
dilations=dilations, activation=activation,
dropout=dropout)
# Final layer.
x = conv("zeros", x, apply_actnorm=False, conv_init="zeros",
output_channels=output_channels, dilations=dilations)
return x
|
python
|
def conv_stack(name, x, mid_channels, output_channels, dilations=None,
activation="relu", dropout=0.0):
"""3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block("conv_block", x, mid_channels=mid_channels,
dilations=dilations, activation=activation,
dropout=dropout)
# Final layer.
x = conv("zeros", x, apply_actnorm=False, conv_init="zeros",
output_channels=output_channels, dilations=dilations)
return x
|
[
"def",
"conv_stack",
"(",
"name",
",",
"x",
",",
"mid_channels",
",",
"output_channels",
",",
"dilations",
"=",
"None",
",",
"activation",
"=",
"\"relu\"",
",",
"dropout",
"=",
"0.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x",
"=",
"conv_block",
"(",
"\"conv_block\"",
",",
"x",
",",
"mid_channels",
"=",
"mid_channels",
",",
"dilations",
"=",
"dilations",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"# Final layer.",
"x",
"=",
"conv",
"(",
"\"zeros\"",
",",
"x",
",",
"apply_actnorm",
"=",
"False",
",",
"conv_init",
"=",
"\"zeros\"",
",",
"output_channels",
"=",
"output_channels",
",",
"dilations",
"=",
"dilations",
")",
"return",
"x"
] |
3-layer convolutional stack.
Args:
name: variable scope.
x: 5-D Tensor.
mid_channels: Number of output channels of the first layer.
output_channels: Number of output channels.
dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.
By default, apply no dilations.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: float, 0.0
Returns:
output: output of 3 layer conv network.
|
[
"3",
"-",
"layer",
"convolutional",
"stack",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L638-L665
|
21,815
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
additive_coupling
|
def additive_coupling(name, x, mid_channels=512, reverse=False,
activation="relu", dropout=0.0):
"""Reversible additive coupling layer.
Args:
name: variable scope.
x: 4-D Tensor, shape=(NHWC).
mid_channels: number of channels in the coupling layer.
reverse: Forward or reverse operation.
activation: "relu" or "gatu"
dropout: default, 0.0
Returns:
output: 4-D Tensor, shape=(NHWC)
objective: 0.0
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output_channels = common_layers.shape_list(x)[-1] // 2
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
z1 = x1
shift = conv_stack("nn", x1, mid_channels, output_channels=output_channels,
activation=activation, dropout=dropout)
if not reverse:
z2 = x2 + shift
else:
z2 = x2 - shift
return tf.concat([z1, z2], axis=3), 0.0
|
python
|
def additive_coupling(name, x, mid_channels=512, reverse=False,
activation="relu", dropout=0.0):
"""Reversible additive coupling layer.
Args:
name: variable scope.
x: 4-D Tensor, shape=(NHWC).
mid_channels: number of channels in the coupling layer.
reverse: Forward or reverse operation.
activation: "relu" or "gatu"
dropout: default, 0.0
Returns:
output: 4-D Tensor, shape=(NHWC)
objective: 0.0
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output_channels = common_layers.shape_list(x)[-1] // 2
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
z1 = x1
shift = conv_stack("nn", x1, mid_channels, output_channels=output_channels,
activation=activation, dropout=dropout)
if not reverse:
z2 = x2 + shift
else:
z2 = x2 - shift
return tf.concat([z1, z2], axis=3), 0.0
|
[
"def",
"additive_coupling",
"(",
"name",
",",
"x",
",",
"mid_channels",
"=",
"512",
",",
"reverse",
"=",
"False",
",",
"activation",
"=",
"\"relu\"",
",",
"dropout",
"=",
"0.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"output_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"-",
"1",
"]",
"//",
"2",
"x1",
",",
"x2",
"=",
"tf",
".",
"split",
"(",
"x",
",",
"num_or_size_splits",
"=",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"z1",
"=",
"x1",
"shift",
"=",
"conv_stack",
"(",
"\"nn\"",
",",
"x1",
",",
"mid_channels",
",",
"output_channels",
"=",
"output_channels",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"if",
"not",
"reverse",
":",
"z2",
"=",
"x2",
"+",
"shift",
"else",
":",
"z2",
"=",
"x2",
"-",
"shift",
"return",
"tf",
".",
"concat",
"(",
"[",
"z1",
",",
"z2",
"]",
",",
"axis",
"=",
"3",
")",
",",
"0.0"
] |
Reversible additive coupling layer.
Args:
name: variable scope.
x: 4-D Tensor, shape=(NHWC).
mid_channels: number of channels in the coupling layer.
reverse: Forward or reverse operation.
activation: "relu" or "gatu"
dropout: default, 0.0
Returns:
output: 4-D Tensor, shape=(NHWC)
objective: 0.0
|
[
"Reversible",
"additive",
"coupling",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L669-L696
|
21,816
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
affine_coupling
|
def affine_coupling(name, x, mid_channels=512, activation="relu",
reverse=False, dropout=0.0):
"""Reversible affine coupling layer.
Args:
name: variable scope.
x: 4-D Tensor.
mid_channels: number of channels in the coupling layer.
activation: Can be either "relu" or "gatu".
reverse: Forward or reverse operation.
dropout: default, 0.0
Returns:
output: x shifted and scaled by an affine transformation.
objective: log-determinant of the jacobian
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
# scale, shift = NN(x1)
# If reverse:
# z2 = scale * (x2 + shift)
# Else:
# z2 = (x2 / scale) - shift
z1 = x1
log_scale_and_shift = conv_stack(
"nn", x1, mid_channels, x_shape[-1], activation=activation,
dropout=dropout)
shift = log_scale_and_shift[:, :, :, 0::2]
scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0)
if not reverse:
z2 = (x2 + shift) * scale
else:
z2 = x2 / scale - shift
objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
if reverse:
objective *= -1
return tf.concat([z1, z2], axis=3), objective
|
python
|
def affine_coupling(name, x, mid_channels=512, activation="relu",
reverse=False, dropout=0.0):
"""Reversible affine coupling layer.
Args:
name: variable scope.
x: 4-D Tensor.
mid_channels: number of channels in the coupling layer.
activation: Can be either "relu" or "gatu".
reverse: Forward or reverse operation.
dropout: default, 0.0
Returns:
output: x shifted and scaled by an affine transformation.
objective: log-determinant of the jacobian
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
# scale, shift = NN(x1)
# If reverse:
# z2 = scale * (x2 + shift)
# Else:
# z2 = (x2 / scale) - shift
z1 = x1
log_scale_and_shift = conv_stack(
"nn", x1, mid_channels, x_shape[-1], activation=activation,
dropout=dropout)
shift = log_scale_and_shift[:, :, :, 0::2]
scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0)
if not reverse:
z2 = (x2 + shift) * scale
else:
z2 = x2 / scale - shift
objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
if reverse:
objective *= -1
return tf.concat([z1, z2], axis=3), objective
|
[
"def",
"affine_coupling",
"(",
"name",
",",
"x",
",",
"mid_channels",
"=",
"512",
",",
"activation",
"=",
"\"relu\"",
",",
"reverse",
"=",
"False",
",",
"dropout",
"=",
"0.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"x1",
",",
"x2",
"=",
"tf",
".",
"split",
"(",
"x",
",",
"num_or_size_splits",
"=",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"# scale, shift = NN(x1)",
"# If reverse:",
"# z2 = scale * (x2 + shift)",
"# Else:",
"# z2 = (x2 / scale) - shift",
"z1",
"=",
"x1",
"log_scale_and_shift",
"=",
"conv_stack",
"(",
"\"nn\"",
",",
"x1",
",",
"mid_channels",
",",
"x_shape",
"[",
"-",
"1",
"]",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"shift",
"=",
"log_scale_and_shift",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
":",
"2",
"]",
"scale",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"log_scale_and_shift",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
":",
"2",
"]",
"+",
"2.0",
")",
"if",
"not",
"reverse",
":",
"z2",
"=",
"(",
"x2",
"+",
"shift",
")",
"*",
"scale",
"else",
":",
"z2",
"=",
"x2",
"/",
"scale",
"-",
"shift",
"objective",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"log",
"(",
"scale",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
"if",
"reverse",
":",
"objective",
"*=",
"-",
"1",
"return",
"tf",
".",
"concat",
"(",
"[",
"z1",
",",
"z2",
"]",
",",
"axis",
"=",
"3",
")",
",",
"objective"
] |
Reversible affine coupling layer.
Args:
name: variable scope.
x: 4-D Tensor.
mid_channels: number of channels in the coupling layer.
activation: Can be either "relu" or "gatu".
reverse: Forward or reverse operation.
dropout: default, 0.0
Returns:
output: x shifted and scaled by an affine transformation.
objective: log-determinant of the jacobian
|
[
"Reversible",
"affine",
"coupling",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L700-L738
|
21,817
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
squeeze
|
def squeeze(name, x, factor=2, reverse=True):
"""Block-wise spatial squeezing of x to increase the number of channels.
Args:
name: Used for variable scoping.
x: 4-D Tensor of shape (batch_size X H X W X C)
factor: Factor by which the spatial dimensions should be squeezed.
reverse: Squueze or unsqueeze operation.
Returns:
x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X
(cXfactor^2). If reverse is True, then it is factor = (1 / factor)
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = common_layers.shape_list(x)
if factor == 1:
return x
height = int(shape[1])
width = int(shape[2])
n_channels = int(shape[3])
if not reverse:
assert height % factor == 0 and width % factor == 0
x = tf.reshape(x, [-1, height//factor, factor,
width//factor, factor, n_channels])
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, height//factor, width //
factor, n_channels*factor*factor])
else:
x = tf.reshape(
x, (-1, height, width, int(n_channels/factor**2), factor, factor))
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, (-1, int(height*factor),
int(width*factor), int(n_channels/factor**2)))
return x
|
python
|
def squeeze(name, x, factor=2, reverse=True):
"""Block-wise spatial squeezing of x to increase the number of channels.
Args:
name: Used for variable scoping.
x: 4-D Tensor of shape (batch_size X H X W X C)
factor: Factor by which the spatial dimensions should be squeezed.
reverse: Squueze or unsqueeze operation.
Returns:
x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X
(cXfactor^2). If reverse is True, then it is factor = (1 / factor)
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = common_layers.shape_list(x)
if factor == 1:
return x
height = int(shape[1])
width = int(shape[2])
n_channels = int(shape[3])
if not reverse:
assert height % factor == 0 and width % factor == 0
x = tf.reshape(x, [-1, height//factor, factor,
width//factor, factor, n_channels])
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, height//factor, width //
factor, n_channels*factor*factor])
else:
x = tf.reshape(
x, (-1, height, width, int(n_channels/factor**2), factor, factor))
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, (-1, int(height*factor),
int(width*factor), int(n_channels/factor**2)))
return x
|
[
"def",
"squeeze",
"(",
"name",
",",
"x",
",",
"factor",
"=",
"2",
",",
"reverse",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"factor",
"==",
"1",
":",
"return",
"x",
"height",
"=",
"int",
"(",
"shape",
"[",
"1",
"]",
")",
"width",
"=",
"int",
"(",
"shape",
"[",
"2",
"]",
")",
"n_channels",
"=",
"int",
"(",
"shape",
"[",
"3",
"]",
")",
"if",
"not",
"reverse",
":",
"assert",
"height",
"%",
"factor",
"==",
"0",
"and",
"width",
"%",
"factor",
"==",
"0",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"height",
"//",
"factor",
",",
"factor",
",",
"width",
"//",
"factor",
",",
"factor",
",",
"n_channels",
"]",
")",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"[",
"0",
",",
"1",
",",
"3",
",",
"5",
",",
"2",
",",
"4",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"height",
"//",
"factor",
",",
"width",
"//",
"factor",
",",
"n_channels",
"*",
"factor",
"*",
"factor",
"]",
")",
"else",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"-",
"1",
",",
"height",
",",
"width",
",",
"int",
"(",
"n_channels",
"/",
"factor",
"**",
"2",
")",
",",
"factor",
",",
"factor",
")",
")",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"[",
"0",
",",
"1",
",",
"4",
",",
"2",
",",
"5",
",",
"3",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"-",
"1",
",",
"int",
"(",
"height",
"*",
"factor",
")",
",",
"int",
"(",
"width",
"*",
"factor",
")",
",",
"int",
"(",
"n_channels",
"/",
"factor",
"**",
"2",
")",
")",
")",
"return",
"x"
] |
Block-wise spatial squeezing of x to increase the number of channels.
Args:
name: Used for variable scoping.
x: 4-D Tensor of shape (batch_size X H X W X C)
factor: Factor by which the spatial dimensions should be squeezed.
reverse: Squueze or unsqueeze operation.
Returns:
x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X
(cXfactor^2). If reverse is True, then it is factor = (1 / factor)
|
[
"Block",
"-",
"wise",
"spatial",
"squeezing",
"of",
"x",
"to",
"increase",
"the",
"number",
"of",
"channels",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L742-L776
|
21,818
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
get_dilation_rates
|
def get_dilation_rates(hparams, width):
"""Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
"""
# dil_rate=1 means no dilation.
allowed_dilations = [[1]*5]
apply_dilations = hparams.get("latent_apply_dilations", False)
dilation_rates = hparams.get("latent_dilation_rates", [1, 3])
if apply_dilations:
for rate in dilation_rates:
# k + (k - 1) * rate but k is harcoded to be 3 everywhere.
filter_size = 3 + 2 * rate
if filter_size <= width:
curr_dilation = [1, 1, rate+1, rate+1, 1]
allowed_dilations.append(curr_dilation)
return allowed_dilations
|
python
|
def get_dilation_rates(hparams, width):
"""Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
"""
# dil_rate=1 means no dilation.
allowed_dilations = [[1]*5]
apply_dilations = hparams.get("latent_apply_dilations", False)
dilation_rates = hparams.get("latent_dilation_rates", [1, 3])
if apply_dilations:
for rate in dilation_rates:
# k + (k - 1) * rate but k is harcoded to be 3 everywhere.
filter_size = 3 + 2 * rate
if filter_size <= width:
curr_dilation = [1, 1, rate+1, rate+1, 1]
allowed_dilations.append(curr_dilation)
return allowed_dilations
|
[
"def",
"get_dilation_rates",
"(",
"hparams",
",",
"width",
")",
":",
"# dil_rate=1 means no dilation.",
"allowed_dilations",
"=",
"[",
"[",
"1",
"]",
"*",
"5",
"]",
"apply_dilations",
"=",
"hparams",
".",
"get",
"(",
"\"latent_apply_dilations\"",
",",
"False",
")",
"dilation_rates",
"=",
"hparams",
".",
"get",
"(",
"\"latent_dilation_rates\"",
",",
"[",
"1",
",",
"3",
"]",
")",
"if",
"apply_dilations",
":",
"for",
"rate",
"in",
"dilation_rates",
":",
"# k + (k - 1) * rate but k is harcoded to be 3 everywhere.",
"filter_size",
"=",
"3",
"+",
"2",
"*",
"rate",
"if",
"filter_size",
"<=",
"width",
":",
"curr_dilation",
"=",
"[",
"1",
",",
"1",
",",
"rate",
"+",
"1",
",",
"rate",
"+",
"1",
",",
"1",
"]",
"allowed_dilations",
".",
"append",
"(",
"curr_dilation",
")",
"return",
"allowed_dilations"
] |
Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
|
[
"Get",
"a",
"list",
"of",
"valid",
"dilation",
"rates",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L779-L800
|
21,819
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
temporal_latent_to_dist
|
def temporal_latent_to_dist(name, x, hparams, output_channels=None):
"""Network that maps a time-indexed list of 3-D latents to a gaussian.
Args:
name: variable scope.
x: List of 4-D Tensors indexed by time, (NHWC)
hparams: tf.contrib.training.Hparams.
output_channels: int, Number of channels of the output gaussian mean.
Returns:
dist: tfp.distributions.Normal
"""
_, _, width, _, res_channels = common_layers.shape_list(x)
if output_channels is None:
output_channels = res_channels
dilation_rates = get_dilation_rates(hparams, width)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = x
for i in range(hparams.latent_encoder_depth):
if hparams.latent_apply_dilations:
h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h,
mid_channels=hparams.latent_encoder_width,
output_channels=res_channels,
dilation_rates=dilation_rates,
activation=hparams.latent_activation,
dropout=hparams.latent_dropout)
else:
h2 = conv_stack("latent_3d_res_%d" % i, h,
mid_channels=hparams.latent_encoder_width,
output_channels=res_channels,
activation=hparams.latent_activation,
dropout=hparams.latent_dropout)
h += h2
# take last activation that should capture all context since padding is
# on left.
h = h[:, -1, :, :, :]
h = conv("res_final", h, apply_actnorm=False, conv_init="zeros",
output_channels=2*output_channels, filter_size=[1, 1])
mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2]
return tfp.distributions.Normal(mean, tf.exp(log_scale))
|
python
|
def temporal_latent_to_dist(name, x, hparams, output_channels=None):
"""Network that maps a time-indexed list of 3-D latents to a gaussian.
Args:
name: variable scope.
x: List of 4-D Tensors indexed by time, (NHWC)
hparams: tf.contrib.training.Hparams.
output_channels: int, Number of channels of the output gaussian mean.
Returns:
dist: tfp.distributions.Normal
"""
_, _, width, _, res_channels = common_layers.shape_list(x)
if output_channels is None:
output_channels = res_channels
dilation_rates = get_dilation_rates(hparams, width)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = x
for i in range(hparams.latent_encoder_depth):
if hparams.latent_apply_dilations:
h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h,
mid_channels=hparams.latent_encoder_width,
output_channels=res_channels,
dilation_rates=dilation_rates,
activation=hparams.latent_activation,
dropout=hparams.latent_dropout)
else:
h2 = conv_stack("latent_3d_res_%d" % i, h,
mid_channels=hparams.latent_encoder_width,
output_channels=res_channels,
activation=hparams.latent_activation,
dropout=hparams.latent_dropout)
h += h2
# take last activation that should capture all context since padding is
# on left.
h = h[:, -1, :, :, :]
h = conv("res_final", h, apply_actnorm=False, conv_init="zeros",
output_channels=2*output_channels, filter_size=[1, 1])
mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2]
return tfp.distributions.Normal(mean, tf.exp(log_scale))
|
[
"def",
"temporal_latent_to_dist",
"(",
"name",
",",
"x",
",",
"hparams",
",",
"output_channels",
"=",
"None",
")",
":",
"_",
",",
"_",
",",
"width",
",",
"_",
",",
"res_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"output_channels",
"is",
"None",
":",
"output_channels",
"=",
"res_channels",
"dilation_rates",
"=",
"get_dilation_rates",
"(",
"hparams",
",",
"width",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"h",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"hparams",
".",
"latent_encoder_depth",
")",
":",
"if",
"hparams",
".",
"latent_apply_dilations",
":",
"h2",
"=",
"dilated_conv_stack",
"(",
"\"dil_latent_3d_res_%d\"",
"%",
"i",
",",
"h",
",",
"mid_channels",
"=",
"hparams",
".",
"latent_encoder_width",
",",
"output_channels",
"=",
"res_channels",
",",
"dilation_rates",
"=",
"dilation_rates",
",",
"activation",
"=",
"hparams",
".",
"latent_activation",
",",
"dropout",
"=",
"hparams",
".",
"latent_dropout",
")",
"else",
":",
"h2",
"=",
"conv_stack",
"(",
"\"latent_3d_res_%d\"",
"%",
"i",
",",
"h",
",",
"mid_channels",
"=",
"hparams",
".",
"latent_encoder_width",
",",
"output_channels",
"=",
"res_channels",
",",
"activation",
"=",
"hparams",
".",
"latent_activation",
",",
"dropout",
"=",
"hparams",
".",
"latent_dropout",
")",
"h",
"+=",
"h2",
"# take last activation that should capture all context since padding is",
"# on left.",
"h",
"=",
"h",
"[",
":",
",",
"-",
"1",
",",
":",
",",
":",
",",
":",
"]",
"h",
"=",
"conv",
"(",
"\"res_final\"",
",",
"h",
",",
"apply_actnorm",
"=",
"False",
",",
"conv_init",
"=",
"\"zeros\"",
",",
"output_channels",
"=",
"2",
"*",
"output_channels",
",",
"filter_size",
"=",
"[",
"1",
",",
"1",
"]",
")",
"mean",
",",
"log_scale",
"=",
"h",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
":",
"2",
"]",
",",
"h",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
":",
"2",
"]",
"return",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"mean",
",",
"tf",
".",
"exp",
"(",
"log_scale",
")",
")"
] |
Network that maps a time-indexed list of 3-D latents to a gaussian.
Args:
name: variable scope.
x: List of 4-D Tensors indexed by time, (NHWC)
hparams: tf.contrib.training.Hparams.
output_channels: int, Number of channels of the output gaussian mean.
Returns:
dist: tfp.distributions.Normal
|
[
"Network",
"that",
"maps",
"a",
"time",
"-",
"indexed",
"list",
"of",
"3",
"-",
"D",
"latents",
"to",
"a",
"gaussian",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L804-L843
|
21,820
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
single_conv_dist
|
def single_conv_dist(name, x, output_channels=None):
"""A 3x3 convolution mapping x to a standard normal distribution at init.
Args:
name: variable scope.
x: 4-D Tensor.
output_channels: number of channels of the mean and std.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
conv_init="zeros", apply_actnorm=False)
mean = mean_log_scale[:, :, :, 0::2]
log_scale = mean_log_scale[:, :, :, 1::2]
return tf.distributions.Normal(mean, tf.exp(log_scale))
|
python
|
def single_conv_dist(name, x, output_channels=None):
"""A 3x3 convolution mapping x to a standard normal distribution at init.
Args:
name: variable scope.
x: 4-D Tensor.
output_channels: number of channels of the mean and std.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
conv_init="zeros", apply_actnorm=False)
mean = mean_log_scale[:, :, :, 0::2]
log_scale = mean_log_scale[:, :, :, 1::2]
return tf.distributions.Normal(mean, tf.exp(log_scale))
|
[
"def",
"single_conv_dist",
"(",
"name",
",",
"x",
",",
"output_channels",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"output_channels",
"is",
"None",
":",
"output_channels",
"=",
"x_shape",
"[",
"-",
"1",
"]",
"mean_log_scale",
"=",
"conv",
"(",
"\"conv2d\"",
",",
"x",
",",
"output_channels",
"=",
"2",
"*",
"output_channels",
",",
"conv_init",
"=",
"\"zeros\"",
",",
"apply_actnorm",
"=",
"False",
")",
"mean",
"=",
"mean_log_scale",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
":",
"2",
"]",
"log_scale",
"=",
"mean_log_scale",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
":",
"2",
"]",
"return",
"tf",
".",
"distributions",
".",
"Normal",
"(",
"mean",
",",
"tf",
".",
"exp",
"(",
"log_scale",
")",
")"
] |
A 3x3 convolution mapping x to a standard normal distribution at init.
Args:
name: variable scope.
x: 4-D Tensor.
output_channels: number of channels of the mean and std.
|
[
"A",
"3x3",
"convolution",
"mapping",
"x",
"to",
"a",
"standard",
"normal",
"distribution",
"at",
"init",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L847-L863
|
21,821
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
latent_to_dist
|
def latent_to_dist(name, x, hparams, output_channels=None):
"""Map latent to the mean and log-scale of a Gaussian.
Args:
name: variable scope.
x: 4-D Tensor of shape (NHWC)
hparams: HParams.
latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet",
default = single_conv
latent_encoder_depth - int, depth of architecture, valid if
latent_architecture is "glow_nn" or "glow_resnet".
latent_pre_output_channels - 512, valid only when latent_architecture
is "glow_nn".
latent_encoder_width - 512, maximum width of the network
output_channels: int, number of output channels of the mean (and std).
if not provided, set it to be the output channels of x.
Returns:
dist: instance of tfp.distributions.Normal
Raises:
ValueError: If architecture not in ["single_conv", "glow_nn"]
"""
architecture = hparams.get("latent_architecture", "single_conv")
depth = hparams.get("latent_encoder_depth", 1)
pre_output_channels = hparams.get("latent_pre_output_channels", 512)
width = hparams.get("latent_encoder_width", 512)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
if architecture == "single_conv":
return single_conv_dist("single_conv", x, output_channels)
if architecture == "glow_nn":
mean_log_scale = x
for layer in range(1, depth + 1):
mid_channels = pre_output_channels // 2**(depth - layer)
mean_log_scale = conv_block("glow_nn_%d" % layer, mean_log_scale,
mid_channels=mid_channels)
mean_log_scale = conv("glow_nn_zeros", mean_log_scale,
filter_size=[3, 3], stride=[1, 1],
output_channels=2*output_channels,
apply_actnorm=False, conv_init="zeros")
elif architecture == "glow_resnet":
h = x
for layer in range(depth):
h3 = conv_stack("latent_resnet_%d" % layer, h,
mid_channels=width, output_channels=x_shape[-1],
dropout=hparams.coupling_dropout)
h += h3
mean_log_scale = conv("glow_res_final", h, conv_init="zeros",
output_channels=2*output_channels,
apply_actnorm=False)
else:
raise ValueError("expected architecture to be single_conv or glow_nn "
"got %s" % architecture)
mean = mean_log_scale[:, :, :, 0::2]
log_scale = mean_log_scale[:, :, :, 1::2]
return tfp.distributions.Normal(mean, tf.exp(log_scale))
|
python
|
def latent_to_dist(name, x, hparams, output_channels=None):
"""Map latent to the mean and log-scale of a Gaussian.
Args:
name: variable scope.
x: 4-D Tensor of shape (NHWC)
hparams: HParams.
latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet",
default = single_conv
latent_encoder_depth - int, depth of architecture, valid if
latent_architecture is "glow_nn" or "glow_resnet".
latent_pre_output_channels - 512, valid only when latent_architecture
is "glow_nn".
latent_encoder_width - 512, maximum width of the network
output_channels: int, number of output channels of the mean (and std).
if not provided, set it to be the output channels of x.
Returns:
dist: instance of tfp.distributions.Normal
Raises:
ValueError: If architecture not in ["single_conv", "glow_nn"]
"""
architecture = hparams.get("latent_architecture", "single_conv")
depth = hparams.get("latent_encoder_depth", 1)
pre_output_channels = hparams.get("latent_pre_output_channels", 512)
width = hparams.get("latent_encoder_width", 512)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
if architecture == "single_conv":
return single_conv_dist("single_conv", x, output_channels)
if architecture == "glow_nn":
mean_log_scale = x
for layer in range(1, depth + 1):
mid_channels = pre_output_channels // 2**(depth - layer)
mean_log_scale = conv_block("glow_nn_%d" % layer, mean_log_scale,
mid_channels=mid_channels)
mean_log_scale = conv("glow_nn_zeros", mean_log_scale,
filter_size=[3, 3], stride=[1, 1],
output_channels=2*output_channels,
apply_actnorm=False, conv_init="zeros")
elif architecture == "glow_resnet":
h = x
for layer in range(depth):
h3 = conv_stack("latent_resnet_%d" % layer, h,
mid_channels=width, output_channels=x_shape[-1],
dropout=hparams.coupling_dropout)
h += h3
mean_log_scale = conv("glow_res_final", h, conv_init="zeros",
output_channels=2*output_channels,
apply_actnorm=False)
else:
raise ValueError("expected architecture to be single_conv or glow_nn "
"got %s" % architecture)
mean = mean_log_scale[:, :, :, 0::2]
log_scale = mean_log_scale[:, :, :, 1::2]
return tfp.distributions.Normal(mean, tf.exp(log_scale))
|
[
"def",
"latent_to_dist",
"(",
"name",
",",
"x",
",",
"hparams",
",",
"output_channels",
"=",
"None",
")",
":",
"architecture",
"=",
"hparams",
".",
"get",
"(",
"\"latent_architecture\"",
",",
"\"single_conv\"",
")",
"depth",
"=",
"hparams",
".",
"get",
"(",
"\"latent_encoder_depth\"",
",",
"1",
")",
"pre_output_channels",
"=",
"hparams",
".",
"get",
"(",
"\"latent_pre_output_channels\"",
",",
"512",
")",
"width",
"=",
"hparams",
".",
"get",
"(",
"\"latent_encoder_width\"",
",",
"512",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"if",
"output_channels",
"is",
"None",
":",
"output_channels",
"=",
"x_shape",
"[",
"-",
"1",
"]",
"if",
"architecture",
"==",
"\"single_conv\"",
":",
"return",
"single_conv_dist",
"(",
"\"single_conv\"",
",",
"x",
",",
"output_channels",
")",
"if",
"architecture",
"==",
"\"glow_nn\"",
":",
"mean_log_scale",
"=",
"x",
"for",
"layer",
"in",
"range",
"(",
"1",
",",
"depth",
"+",
"1",
")",
":",
"mid_channels",
"=",
"pre_output_channels",
"//",
"2",
"**",
"(",
"depth",
"-",
"layer",
")",
"mean_log_scale",
"=",
"conv_block",
"(",
"\"glow_nn_%d\"",
"%",
"layer",
",",
"mean_log_scale",
",",
"mid_channels",
"=",
"mid_channels",
")",
"mean_log_scale",
"=",
"conv",
"(",
"\"glow_nn_zeros\"",
",",
"mean_log_scale",
",",
"filter_size",
"=",
"[",
"3",
",",
"3",
"]",
",",
"stride",
"=",
"[",
"1",
",",
"1",
"]",
",",
"output_channels",
"=",
"2",
"*",
"output_channels",
",",
"apply_actnorm",
"=",
"False",
",",
"conv_init",
"=",
"\"zeros\"",
")",
"elif",
"architecture",
"==",
"\"glow_resnet\"",
":",
"h",
"=",
"x",
"for",
"layer",
"in",
"range",
"(",
"depth",
")",
":",
"h3",
"=",
"conv_stack",
"(",
"\"latent_resnet_%d\"",
"%",
"layer",
",",
"h",
",",
"mid_channels",
"=",
"width",
",",
"output_channels",
"=",
"x_shape",
"[",
"-",
"1",
"]",
",",
"dropout",
"=",
"hparams",
".",
"coupling_dropout",
")",
"h",
"+=",
"h3",
"mean_log_scale",
"=",
"conv",
"(",
"\"glow_res_final\"",
",",
"h",
",",
"conv_init",
"=",
"\"zeros\"",
",",
"output_channels",
"=",
"2",
"*",
"output_channels",
",",
"apply_actnorm",
"=",
"False",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"expected architecture to be single_conv or glow_nn \"",
"\"got %s\"",
"%",
"architecture",
")",
"mean",
"=",
"mean_log_scale",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
":",
"2",
"]",
"log_scale",
"=",
"mean_log_scale",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
":",
"2",
"]",
"return",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"mean",
",",
"tf",
".",
"exp",
"(",
"log_scale",
")",
")"
] |
Map latent to the mean and log-scale of a Gaussian.
Args:
name: variable scope.
x: 4-D Tensor of shape (NHWC)
hparams: HParams.
latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet",
default = single_conv
latent_encoder_depth - int, depth of architecture, valid if
latent_architecture is "glow_nn" or "glow_resnet".
latent_pre_output_channels - 512, valid only when latent_architecture
is "glow_nn".
latent_encoder_width - 512, maximum width of the network
output_channels: int, number of output channels of the mean (and std).
if not provided, set it to be the output channels of x.
Returns:
dist: instance of tfp.distributions.Normal
Raises:
ValueError: If architecture not in ["single_conv", "glow_nn"]
|
[
"Map",
"latent",
"to",
"the",
"mean",
"and",
"log",
"-",
"scale",
"of",
"a",
"Gaussian",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L867-L925
|
21,822
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
noise_op
|
def noise_op(latents, hparams):
"""Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended.
"""
if hparams.latent_noise == 0 or hparams.mode != tf.estimator.ModeKeys.TRAIN:
return latents
latent_shape = common_layers.shape_list(latents)
return latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise)
|
python
|
def noise_op(latents, hparams):
"""Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended.
"""
if hparams.latent_noise == 0 or hparams.mode != tf.estimator.ModeKeys.TRAIN:
return latents
latent_shape = common_layers.shape_list(latents)
return latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise)
|
[
"def",
"noise_op",
"(",
"latents",
",",
"hparams",
")",
":",
"if",
"hparams",
".",
"latent_noise",
"==",
"0",
"or",
"hparams",
".",
"mode",
"!=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"return",
"latents",
"latent_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"latents",
")",
"return",
"latents",
"+",
"tf",
".",
"random_normal",
"(",
"latent_shape",
",",
"stddev",
"=",
"hparams",
".",
"latent_noise",
")"
] |
Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended.
|
[
"Adds",
"isotropic",
"gaussian",
"-",
"noise",
"to",
"each",
"latent",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L929-L941
|
21,823
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
merge_level_and_latent_dist
|
def merge_level_and_latent_dist(level_dist, latent_dist,
merge_std="prev_level"):
"""Merge level_dist and latent_dist.
new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined
according to merge_std.
Args:
level_dist: instance of tfp.distributions.Normal
latent_dist: instance of tfp.distributions.Normal
merge_std: can be "prev_level", "prev_step" or "normal".
Returns:
merged_dist: instance of tfp.distributions.Normal
"""
level_mean, level_std = level_dist.loc, level_dist.scale
latent_mean, latent_std = latent_dist.loc, latent_dist.scale
new_mean = level_mean + latent_mean
if merge_std == "normal":
z_shape = common_layers.shape_list(latent_mean)
log_scale = tf.get_variable(
"merge_std", shape=z_shape, dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=False)
scale = tf.exp(log_scale * 3.0)
elif merge_std == "prev_level":
scale = level_std
elif merge_std == "prev_step":
scale = latent_std
return tfp.distributions.Normal(loc=new_mean, scale=scale)
|
python
|
def merge_level_and_latent_dist(level_dist, latent_dist,
merge_std="prev_level"):
"""Merge level_dist and latent_dist.
new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined
according to merge_std.
Args:
level_dist: instance of tfp.distributions.Normal
latent_dist: instance of tfp.distributions.Normal
merge_std: can be "prev_level", "prev_step" or "normal".
Returns:
merged_dist: instance of tfp.distributions.Normal
"""
level_mean, level_std = level_dist.loc, level_dist.scale
latent_mean, latent_std = latent_dist.loc, latent_dist.scale
new_mean = level_mean + latent_mean
if merge_std == "normal":
z_shape = common_layers.shape_list(latent_mean)
log_scale = tf.get_variable(
"merge_std", shape=z_shape, dtype=tf.float32,
initializer=tf.zeros_initializer(), trainable=False)
scale = tf.exp(log_scale * 3.0)
elif merge_std == "prev_level":
scale = level_std
elif merge_std == "prev_step":
scale = latent_std
return tfp.distributions.Normal(loc=new_mean, scale=scale)
|
[
"def",
"merge_level_and_latent_dist",
"(",
"level_dist",
",",
"latent_dist",
",",
"merge_std",
"=",
"\"prev_level\"",
")",
":",
"level_mean",
",",
"level_std",
"=",
"level_dist",
".",
"loc",
",",
"level_dist",
".",
"scale",
"latent_mean",
",",
"latent_std",
"=",
"latent_dist",
".",
"loc",
",",
"latent_dist",
".",
"scale",
"new_mean",
"=",
"level_mean",
"+",
"latent_mean",
"if",
"merge_std",
"==",
"\"normal\"",
":",
"z_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"latent_mean",
")",
"log_scale",
"=",
"tf",
".",
"get_variable",
"(",
"\"merge_std\"",
",",
"shape",
"=",
"z_shape",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"trainable",
"=",
"False",
")",
"scale",
"=",
"tf",
".",
"exp",
"(",
"log_scale",
"*",
"3.0",
")",
"elif",
"merge_std",
"==",
"\"prev_level\"",
":",
"scale",
"=",
"level_std",
"elif",
"merge_std",
"==",
"\"prev_step\"",
":",
"scale",
"=",
"latent_std",
"return",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"loc",
"=",
"new_mean",
",",
"scale",
"=",
"scale",
")"
] |
Merge level_dist and latent_dist.
new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined
according to merge_std.
Args:
level_dist: instance of tfp.distributions.Normal
latent_dist: instance of tfp.distributions.Normal
merge_std: can be "prev_level", "prev_step" or "normal".
Returns:
merged_dist: instance of tfp.distributions.Normal
|
[
"Merge",
"level_dist",
"and",
"latent_dist",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L945-L972
|
21,824
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
level_cond_prior
|
def level_cond_prior(prior_dist, z, latent, hparams, state):
"""Returns a conditional prior for each level.
Args:
prior_dist: Distribution conditioned on the previous levels.
z: Tensor, output of the previous levels.
latent: Tensor or a list of tensors to condition the latent_distribution.
hparams: next_frame_glow hparams.
state: Current LSTM state. Used only if hparams.latent_dist_encoder is
a lstm.
Raises:
ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape
of latent is different from z.
"""
latent_dist_encoder = hparams.get("latent_dist_encoder", None)
latent_skip = hparams.get("latent_skip", False)
if latent_dist_encoder == "pointwise":
last_latent = latent
merge_std = hparams.level_scale
latent_shape = common_layers.shape_list(latent)
z_shape = common_layers.shape_list(z)
if latent_shape != z_shape:
raise ValueError("Expected latent_shape to be %s, got %s" %
(latent_shape, z_shape))
latent_dist = scale_gaussian_prior(
"latent_prior", latent, logscale_factor=3.0)
cond_dist = merge_level_and_latent_dist(prior_dist, latent_dist,
merge_std=merge_std)
elif latent_dist_encoder == "conv_net":
output_channels = common_layers.shape_list(z)[-1]
last_latent = latent[-1]
latent_stack = tf.concat([prior_dist.loc] + latent, axis=-1)
latent_stack = noise_op(latent_stack, hparams)
cond_dist = latent_to_dist(
"latent_stack", latent_stack, hparams=hparams,
output_channels=output_channels)
elif latent_dist_encoder == "conv3d_net":
last_latent = latent[-1]
output_channels = common_layers.shape_list(last_latent)[-1]
num_steps = len(latent)
# Stack across time.
cond_latents = tf.stack(latent, axis=1)
# Concat latents from previous levels across channels.
prev_latents = tf.tile(tf.expand_dims(prior_dist.loc, axis=1),
[1, num_steps, 1, 1, 1])
cond_latents = tf.concat((cond_latents, prev_latents), axis=-1)
cond_latents = noise_op(cond_latents, hparams)
cond_dist = temporal_latent_to_dist(
"latent_stack", cond_latents, hparams, output_channels=output_channels)
elif latent_dist_encoder == "conv_lstm":
last_latent = latent
output_channels = common_layers.shape_list(z)[-1]
latent_stack = tf.concat((prior_dist.loc, latent), axis=-1)
latent_stack = noise_op(latent_stack, hparams)
_, state = common_video.conv_lstm_2d(
latent_stack, state, hparams.latent_encoder_width, kernel_size=3,
name="conv_lstm")
cond_dist = single_conv_dist(
"state_to_dist", state.h, output_channels=output_channels)
if latent_skip:
new_mean = cond_dist.loc + last_latent
cond_dist = tfp.distributions.Normal(new_mean, cond_dist.scale)
return cond_dist.loc, cond_dist.scale, state
|
python
|
def level_cond_prior(prior_dist, z, latent, hparams, state):
"""Returns a conditional prior for each level.
Args:
prior_dist: Distribution conditioned on the previous levels.
z: Tensor, output of the previous levels.
latent: Tensor or a list of tensors to condition the latent_distribution.
hparams: next_frame_glow hparams.
state: Current LSTM state. Used only if hparams.latent_dist_encoder is
a lstm.
Raises:
ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape
of latent is different from z.
"""
latent_dist_encoder = hparams.get("latent_dist_encoder", None)
latent_skip = hparams.get("latent_skip", False)
if latent_dist_encoder == "pointwise":
last_latent = latent
merge_std = hparams.level_scale
latent_shape = common_layers.shape_list(latent)
z_shape = common_layers.shape_list(z)
if latent_shape != z_shape:
raise ValueError("Expected latent_shape to be %s, got %s" %
(latent_shape, z_shape))
latent_dist = scale_gaussian_prior(
"latent_prior", latent, logscale_factor=3.0)
cond_dist = merge_level_and_latent_dist(prior_dist, latent_dist,
merge_std=merge_std)
elif latent_dist_encoder == "conv_net":
output_channels = common_layers.shape_list(z)[-1]
last_latent = latent[-1]
latent_stack = tf.concat([prior_dist.loc] + latent, axis=-1)
latent_stack = noise_op(latent_stack, hparams)
cond_dist = latent_to_dist(
"latent_stack", latent_stack, hparams=hparams,
output_channels=output_channels)
elif latent_dist_encoder == "conv3d_net":
last_latent = latent[-1]
output_channels = common_layers.shape_list(last_latent)[-1]
num_steps = len(latent)
# Stack across time.
cond_latents = tf.stack(latent, axis=1)
# Concat latents from previous levels across channels.
prev_latents = tf.tile(tf.expand_dims(prior_dist.loc, axis=1),
[1, num_steps, 1, 1, 1])
cond_latents = tf.concat((cond_latents, prev_latents), axis=-1)
cond_latents = noise_op(cond_latents, hparams)
cond_dist = temporal_latent_to_dist(
"latent_stack", cond_latents, hparams, output_channels=output_channels)
elif latent_dist_encoder == "conv_lstm":
last_latent = latent
output_channels = common_layers.shape_list(z)[-1]
latent_stack = tf.concat((prior_dist.loc, latent), axis=-1)
latent_stack = noise_op(latent_stack, hparams)
_, state = common_video.conv_lstm_2d(
latent_stack, state, hparams.latent_encoder_width, kernel_size=3,
name="conv_lstm")
cond_dist = single_conv_dist(
"state_to_dist", state.h, output_channels=output_channels)
if latent_skip:
new_mean = cond_dist.loc + last_latent
cond_dist = tfp.distributions.Normal(new_mean, cond_dist.scale)
return cond_dist.loc, cond_dist.scale, state
|
[
"def",
"level_cond_prior",
"(",
"prior_dist",
",",
"z",
",",
"latent",
",",
"hparams",
",",
"state",
")",
":",
"latent_dist_encoder",
"=",
"hparams",
".",
"get",
"(",
"\"latent_dist_encoder\"",
",",
"None",
")",
"latent_skip",
"=",
"hparams",
".",
"get",
"(",
"\"latent_skip\"",
",",
"False",
")",
"if",
"latent_dist_encoder",
"==",
"\"pointwise\"",
":",
"last_latent",
"=",
"latent",
"merge_std",
"=",
"hparams",
".",
"level_scale",
"latent_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"latent",
")",
"z_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"z",
")",
"if",
"latent_shape",
"!=",
"z_shape",
":",
"raise",
"ValueError",
"(",
"\"Expected latent_shape to be %s, got %s\"",
"%",
"(",
"latent_shape",
",",
"z_shape",
")",
")",
"latent_dist",
"=",
"scale_gaussian_prior",
"(",
"\"latent_prior\"",
",",
"latent",
",",
"logscale_factor",
"=",
"3.0",
")",
"cond_dist",
"=",
"merge_level_and_latent_dist",
"(",
"prior_dist",
",",
"latent_dist",
",",
"merge_std",
"=",
"merge_std",
")",
"elif",
"latent_dist_encoder",
"==",
"\"conv_net\"",
":",
"output_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"z",
")",
"[",
"-",
"1",
"]",
"last_latent",
"=",
"latent",
"[",
"-",
"1",
"]",
"latent_stack",
"=",
"tf",
".",
"concat",
"(",
"[",
"prior_dist",
".",
"loc",
"]",
"+",
"latent",
",",
"axis",
"=",
"-",
"1",
")",
"latent_stack",
"=",
"noise_op",
"(",
"latent_stack",
",",
"hparams",
")",
"cond_dist",
"=",
"latent_to_dist",
"(",
"\"latent_stack\"",
",",
"latent_stack",
",",
"hparams",
"=",
"hparams",
",",
"output_channels",
"=",
"output_channels",
")",
"elif",
"latent_dist_encoder",
"==",
"\"conv3d_net\"",
":",
"last_latent",
"=",
"latent",
"[",
"-",
"1",
"]",
"output_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"last_latent",
")",
"[",
"-",
"1",
"]",
"num_steps",
"=",
"len",
"(",
"latent",
")",
"# Stack across time.",
"cond_latents",
"=",
"tf",
".",
"stack",
"(",
"latent",
",",
"axis",
"=",
"1",
")",
"# Concat latents from previous levels across channels.",
"prev_latents",
"=",
"tf",
".",
"tile",
"(",
"tf",
".",
"expand_dims",
"(",
"prior_dist",
".",
"loc",
",",
"axis",
"=",
"1",
")",
",",
"[",
"1",
",",
"num_steps",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"cond_latents",
"=",
"tf",
".",
"concat",
"(",
"(",
"cond_latents",
",",
"prev_latents",
")",
",",
"axis",
"=",
"-",
"1",
")",
"cond_latents",
"=",
"noise_op",
"(",
"cond_latents",
",",
"hparams",
")",
"cond_dist",
"=",
"temporal_latent_to_dist",
"(",
"\"latent_stack\"",
",",
"cond_latents",
",",
"hparams",
",",
"output_channels",
"=",
"output_channels",
")",
"elif",
"latent_dist_encoder",
"==",
"\"conv_lstm\"",
":",
"last_latent",
"=",
"latent",
"output_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"z",
")",
"[",
"-",
"1",
"]",
"latent_stack",
"=",
"tf",
".",
"concat",
"(",
"(",
"prior_dist",
".",
"loc",
",",
"latent",
")",
",",
"axis",
"=",
"-",
"1",
")",
"latent_stack",
"=",
"noise_op",
"(",
"latent_stack",
",",
"hparams",
")",
"_",
",",
"state",
"=",
"common_video",
".",
"conv_lstm_2d",
"(",
"latent_stack",
",",
"state",
",",
"hparams",
".",
"latent_encoder_width",
",",
"kernel_size",
"=",
"3",
",",
"name",
"=",
"\"conv_lstm\"",
")",
"cond_dist",
"=",
"single_conv_dist",
"(",
"\"state_to_dist\"",
",",
"state",
".",
"h",
",",
"output_channels",
"=",
"output_channels",
")",
"if",
"latent_skip",
":",
"new_mean",
"=",
"cond_dist",
".",
"loc",
"+",
"last_latent",
"cond_dist",
"=",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"new_mean",
",",
"cond_dist",
".",
"scale",
")",
"return",
"cond_dist",
".",
"loc",
",",
"cond_dist",
".",
"scale",
",",
"state"
] |
Returns a conditional prior for each level.
Args:
prior_dist: Distribution conditioned on the previous levels.
z: Tensor, output of the previous levels.
latent: Tensor or a list of tensors to condition the latent_distribution.
hparams: next_frame_glow hparams.
state: Current LSTM state. Used only if hparams.latent_dist_encoder is
a lstm.
Raises:
ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape
of latent is different from z.
|
[
"Returns",
"a",
"conditional",
"prior",
"for",
"each",
"level",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L976-L1044
|
21,825
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
revnet_step
|
def revnet_step(name, x, hparams, reverse=True):
"""One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(
affine_coupling, name="affine", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [
functools.partial(actnorm, name="actnorm", reverse=reverse),
functools.partial(invertible_1x1_conv, name="invertible",
reverse=reverse), coupling_layer]
if reverse:
ops = ops[::-1]
objective = 0.0
for op in ops:
x, curr_obj = op(x=x)
objective += curr_obj
return x, objective
|
python
|
def revnet_step(name, x, hparams, reverse=True):
"""One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(
affine_coupling, name="affine", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [
functools.partial(actnorm, name="actnorm", reverse=reverse),
functools.partial(invertible_1x1_conv, name="invertible",
reverse=reverse), coupling_layer]
if reverse:
ops = ops[::-1]
objective = 0.0
for op in ops:
x, curr_obj = op(x=x)
objective += curr_obj
return x, objective
|
[
"def",
"revnet_step",
"(",
"name",
",",
"x",
",",
"hparams",
",",
"reverse",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"if",
"hparams",
".",
"coupling",
"==",
"\"additive\"",
":",
"coupling_layer",
"=",
"functools",
".",
"partial",
"(",
"additive_coupling",
",",
"name",
"=",
"\"additive\"",
",",
"reverse",
"=",
"reverse",
",",
"mid_channels",
"=",
"hparams",
".",
"coupling_width",
",",
"activation",
"=",
"hparams",
".",
"activation",
",",
"dropout",
"=",
"hparams",
".",
"coupling_dropout",
")",
"else",
":",
"coupling_layer",
"=",
"functools",
".",
"partial",
"(",
"affine_coupling",
",",
"name",
"=",
"\"affine\"",
",",
"reverse",
"=",
"reverse",
",",
"mid_channels",
"=",
"hparams",
".",
"coupling_width",
",",
"activation",
"=",
"hparams",
".",
"activation",
",",
"dropout",
"=",
"hparams",
".",
"coupling_dropout",
")",
"ops",
"=",
"[",
"functools",
".",
"partial",
"(",
"actnorm",
",",
"name",
"=",
"\"actnorm\"",
",",
"reverse",
"=",
"reverse",
")",
",",
"functools",
".",
"partial",
"(",
"invertible_1x1_conv",
",",
"name",
"=",
"\"invertible\"",
",",
"reverse",
"=",
"reverse",
")",
",",
"coupling_layer",
"]",
"if",
"reverse",
":",
"ops",
"=",
"ops",
"[",
":",
":",
"-",
"1",
"]",
"objective",
"=",
"0.0",
"for",
"op",
"in",
"ops",
":",
"x",
",",
"curr_obj",
"=",
"op",
"(",
"x",
"=",
"x",
")",
"objective",
"+=",
"curr_obj",
"return",
"x",
",",
"objective"
] |
One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
|
[
"One",
"step",
"of",
"glow",
"generative",
"flow",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1156-L1193
|
21,826
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
revnet
|
def revnet(name, x, hparams, reverse=True):
"""'hparams.depth' steps of generative flow.
Args:
name: variable scope for the revnet block.
x: 4-D Tensor, shape=(NHWC).
hparams: HParams.
reverse: bool, forward or backward pass.
Returns:
x: 4-D Tensor, shape=(NHWC).
objective: float.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
steps = np.arange(hparams.depth)
if reverse:
steps = steps[::-1]
objective = 0.0
for step in steps:
x, curr_obj = revnet_step(
"revnet_step_%d" % step, x, hparams, reverse=reverse)
objective += curr_obj
return x, objective
|
python
|
def revnet(name, x, hparams, reverse=True):
"""'hparams.depth' steps of generative flow.
Args:
name: variable scope for the revnet block.
x: 4-D Tensor, shape=(NHWC).
hparams: HParams.
reverse: bool, forward or backward pass.
Returns:
x: 4-D Tensor, shape=(NHWC).
objective: float.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
steps = np.arange(hparams.depth)
if reverse:
steps = steps[::-1]
objective = 0.0
for step in steps:
x, curr_obj = revnet_step(
"revnet_step_%d" % step, x, hparams, reverse=reverse)
objective += curr_obj
return x, objective
|
[
"def",
"revnet",
"(",
"name",
",",
"x",
",",
"hparams",
",",
"reverse",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"steps",
"=",
"np",
".",
"arange",
"(",
"hparams",
".",
"depth",
")",
"if",
"reverse",
":",
"steps",
"=",
"steps",
"[",
":",
":",
"-",
"1",
"]",
"objective",
"=",
"0.0",
"for",
"step",
"in",
"steps",
":",
"x",
",",
"curr_obj",
"=",
"revnet_step",
"(",
"\"revnet_step_%d\"",
"%",
"step",
",",
"x",
",",
"hparams",
",",
"reverse",
"=",
"reverse",
")",
"objective",
"+=",
"curr_obj",
"return",
"x",
",",
"objective"
] |
hparams.depth' steps of generative flow.
Args:
name: variable scope for the revnet block.
x: 4-D Tensor, shape=(NHWC).
hparams: HParams.
reverse: bool, forward or backward pass.
Returns:
x: 4-D Tensor, shape=(NHWC).
objective: float.
|
[
"hparams",
".",
"depth",
"steps",
"of",
"generative",
"flow",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1196-L1218
|
21,827
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/glow_ops.py
|
top_prior
|
def top_prior(name, z_shape, learn_prior="normal", temperature=1.0):
"""Unconditional prior distribution.
Args:
name: variable scope
z_shape: Shape of the mean / scale of the prior distribution.
learn_prior: Possible options are "normal" and "single_conv".
If set to "single_conv", the gaussian is parametrized by a
single convolutional layer whose input are an array of zeros
and initialized such that the mean and std are zero and one.
If set to "normal", the prior is just a Gaussian with zero
mean and unit variance.
temperature: Temperature with which to sample from the Gaussian.
Returns:
objective: 1-D Tensor shape=(batch_size,) summed across spatial components.
Raises:
ValueError: If learn_prior not in "normal" or "single_conv"
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = tf.zeros(z_shape, dtype=tf.float32)
if learn_prior == "normal":
prior_dist = tfp.distributions.Normal(h, tf.exp(h))
elif learn_prior == "single_conv":
prior_dist = single_conv_dist("top_learn_prior", h)
else:
raise ValueError("Expected learn_prior to be normal or single_conv "
"got %s" % learn_prior)
return TemperedNormal(prior_dist.loc, prior_dist.scale, temperature)
|
python
|
def top_prior(name, z_shape, learn_prior="normal", temperature=1.0):
"""Unconditional prior distribution.
Args:
name: variable scope
z_shape: Shape of the mean / scale of the prior distribution.
learn_prior: Possible options are "normal" and "single_conv".
If set to "single_conv", the gaussian is parametrized by a
single convolutional layer whose input are an array of zeros
and initialized such that the mean and std are zero and one.
If set to "normal", the prior is just a Gaussian with zero
mean and unit variance.
temperature: Temperature with which to sample from the Gaussian.
Returns:
objective: 1-D Tensor shape=(batch_size,) summed across spatial components.
Raises:
ValueError: If learn_prior not in "normal" or "single_conv"
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = tf.zeros(z_shape, dtype=tf.float32)
if learn_prior == "normal":
prior_dist = tfp.distributions.Normal(h, tf.exp(h))
elif learn_prior == "single_conv":
prior_dist = single_conv_dist("top_learn_prior", h)
else:
raise ValueError("Expected learn_prior to be normal or single_conv "
"got %s" % learn_prior)
return TemperedNormal(prior_dist.loc, prior_dist.scale, temperature)
|
[
"def",
"top_prior",
"(",
"name",
",",
"z_shape",
",",
"learn_prior",
"=",
"\"normal\"",
",",
"temperature",
"=",
"1.0",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"h",
"=",
"tf",
".",
"zeros",
"(",
"z_shape",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"if",
"learn_prior",
"==",
"\"normal\"",
":",
"prior_dist",
"=",
"tfp",
".",
"distributions",
".",
"Normal",
"(",
"h",
",",
"tf",
".",
"exp",
"(",
"h",
")",
")",
"elif",
"learn_prior",
"==",
"\"single_conv\"",
":",
"prior_dist",
"=",
"single_conv_dist",
"(",
"\"top_learn_prior\"",
",",
"h",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Expected learn_prior to be normal or single_conv \"",
"\"got %s\"",
"%",
"learn_prior",
")",
"return",
"TemperedNormal",
"(",
"prior_dist",
".",
"loc",
",",
"prior_dist",
".",
"scale",
",",
"temperature",
")"
] |
Unconditional prior distribution.
Args:
name: variable scope
z_shape: Shape of the mean / scale of the prior distribution.
learn_prior: Possible options are "normal" and "single_conv".
If set to "single_conv", the gaussian is parametrized by a
single convolutional layer whose input are an array of zeros
and initialized such that the mean and std are zero and one.
If set to "normal", the prior is just a Gaussian with zero
mean and unit variance.
temperature: Temperature with which to sample from the Gaussian.
Returns:
objective: 1-D Tensor shape=(batch_size,) summed across spatial components.
Raises:
ValueError: If learn_prior not in "normal" or "single_conv"
|
[
"Unconditional",
"prior",
"distribution",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1249-L1276
|
21,828
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
bfloat16_activations_var_getter
|
def bfloat16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and bfloat16 activations.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.bfloat16:
kwargs["dtype"] = tf.float32
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
|
python
|
def bfloat16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and bfloat16 activations.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.bfloat16:
kwargs["dtype"] = tf.float32
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
|
[
"def",
"bfloat16_activations_var_getter",
"(",
"getter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"requested_dtype",
"=",
"kwargs",
"[",
"\"dtype\"",
"]",
"if",
"requested_dtype",
"==",
"tf",
".",
"bfloat16",
":",
"kwargs",
"[",
"\"dtype\"",
"]",
"=",
"tf",
".",
"float32",
"var",
"=",
"getter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# This if statement is needed to guard the cast, because batch norm",
"# assigns directly to the return value of this custom getter. The cast",
"# makes the return value not a variable so it cannot be assigned. Batch",
"# norm variables are always in fp32 so this if statement is never",
"# triggered for them.",
"if",
"var",
".",
"dtype",
".",
"base_dtype",
"!=",
"requested_dtype",
":",
"var",
"=",
"tf",
".",
"cast",
"(",
"var",
",",
"requested_dtype",
")",
"return",
"var"
] |
A custom getter function for float32 parameters and bfloat16 activations.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
|
[
"A",
"custom",
"getter",
"function",
"for",
"float32",
"parameters",
"and",
"bfloat16",
"activations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L25-L48
|
21,829
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
float16_activations_var_getter
|
def float16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and float16 activations.
This function ensures the following:
1. All variables requested with type fp16 are stored as type fp32.
2. All variables requested with type fp32 are returned as type fp16.
See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/
#training_tensorflow for more information on this strategy.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.float16:
kwargs["dtype"] = tf.float32
if requested_dtype == tf.float32:
requested_dtype = tf.float16
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
|
python
|
def float16_activations_var_getter(getter, *args, **kwargs):
"""A custom getter function for float32 parameters and float16 activations.
This function ensures the following:
1. All variables requested with type fp16 are stored as type fp32.
2. All variables requested with type fp32 are returned as type fp16.
See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/
#training_tensorflow for more information on this strategy.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
"""
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.float16:
kwargs["dtype"] = tf.float32
if requested_dtype == tf.float32:
requested_dtype = tf.float16
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
|
[
"def",
"float16_activations_var_getter",
"(",
"getter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"requested_dtype",
"=",
"kwargs",
"[",
"\"dtype\"",
"]",
"if",
"requested_dtype",
"==",
"tf",
".",
"float16",
":",
"kwargs",
"[",
"\"dtype\"",
"]",
"=",
"tf",
".",
"float32",
"if",
"requested_dtype",
"==",
"tf",
".",
"float32",
":",
"requested_dtype",
"=",
"tf",
".",
"float16",
"var",
"=",
"getter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# This if statement is needed to guard the cast, because batch norm",
"# assigns directly to the return value of this custom getter. The cast",
"# makes the return value not a variable so it cannot be assigned. Batch",
"# norm variables are always in fp32 so this if statement is never",
"# triggered for them.",
"if",
"var",
".",
"dtype",
".",
"base_dtype",
"!=",
"requested_dtype",
":",
"var",
"=",
"tf",
".",
"cast",
"(",
"var",
",",
"requested_dtype",
")",
"return",
"var"
] |
A custom getter function for float32 parameters and float16 activations.
This function ensures the following:
1. All variables requested with type fp16 are stored as type fp32.
2. All variables requested with type fp32 are returned as type fp16.
See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/
#training_tensorflow for more information on this strategy.
Args:
getter: custom getter
*args: arguments
**kwargs: keyword arguments
Returns:
variables with the correct dtype.
Raises:
KeyError: if "dtype" is not provided as a kwarg.
|
[
"A",
"custom",
"getter",
"function",
"for",
"float32",
"parameters",
"and",
"float16",
"activations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L51-L86
|
21,830
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
simulated_quantize
|
def simulated_quantize(x, num_bits, noise):
"""Simulate quantization to num_bits bits, with externally-stored scale.
num_bits is the number of bits used to store each value.
noise is a float32 Tensor containing values in [0, 1).
Each value in noise should take different values across
different steps, approximating a uniform distribution over [0, 1).
In the case of replicated TPU training, noise should be identical
across replicas in order to keep the parameters identical across replicas.
The natural choice for noise would be tf.random_uniform(),
but this is not possible for TPU, since there is currently no way to seed
the different cores to produce identical values across replicas. Instead we
use noise_from_step_num() (see below).
The quantization scheme is as follows:
Compute the maximum absolute value by row (call this max_abs).
Store this either in an auxiliary variable or in an extra column.
Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a
float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1]
Unbiased randomized roundoff by adding noise and rounding down.
This produces a signed integer with num_bits bits which can then be stored.
Args:
x: a float32 Tensor
num_bits: an integer between 1 and 22
noise: a float Tensor broadcastable to the shape of x.
Returns:
a float32 Tensor
"""
shape = x.get_shape().as_list()
if not (len(shape) >= 2 and shape[-1] > 1):
return x
max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9
max_int = 2 ** (num_bits - 1) - 1
scale = max_abs / max_int
x /= scale
x = tf.floor(x + noise)
# dequantize before storing (since this is a simulation)
x *= scale
return x
|
python
|
def simulated_quantize(x, num_bits, noise):
"""Simulate quantization to num_bits bits, with externally-stored scale.
num_bits is the number of bits used to store each value.
noise is a float32 Tensor containing values in [0, 1).
Each value in noise should take different values across
different steps, approximating a uniform distribution over [0, 1).
In the case of replicated TPU training, noise should be identical
across replicas in order to keep the parameters identical across replicas.
The natural choice for noise would be tf.random_uniform(),
but this is not possible for TPU, since there is currently no way to seed
the different cores to produce identical values across replicas. Instead we
use noise_from_step_num() (see below).
The quantization scheme is as follows:
Compute the maximum absolute value by row (call this max_abs).
Store this either in an auxiliary variable or in an extra column.
Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a
float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1]
Unbiased randomized roundoff by adding noise and rounding down.
This produces a signed integer with num_bits bits which can then be stored.
Args:
x: a float32 Tensor
num_bits: an integer between 1 and 22
noise: a float Tensor broadcastable to the shape of x.
Returns:
a float32 Tensor
"""
shape = x.get_shape().as_list()
if not (len(shape) >= 2 and shape[-1] > 1):
return x
max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9
max_int = 2 ** (num_bits - 1) - 1
scale = max_abs / max_int
x /= scale
x = tf.floor(x + noise)
# dequantize before storing (since this is a simulation)
x *= scale
return x
|
[
"def",
"simulated_quantize",
"(",
"x",
",",
"num_bits",
",",
"noise",
")",
":",
"shape",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"if",
"not",
"(",
"len",
"(",
"shape",
")",
">=",
"2",
"and",
"shape",
"[",
"-",
"1",
"]",
">",
"1",
")",
":",
"return",
"x",
"max_abs",
"=",
"tf",
".",
"reduce_max",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
",",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"+",
"1e-9",
"max_int",
"=",
"2",
"**",
"(",
"num_bits",
"-",
"1",
")",
"-",
"1",
"scale",
"=",
"max_abs",
"/",
"max_int",
"x",
"/=",
"scale",
"x",
"=",
"tf",
".",
"floor",
"(",
"x",
"+",
"noise",
")",
"# dequantize before storing (since this is a simulation)",
"x",
"*=",
"scale",
"return",
"x"
] |
Simulate quantization to num_bits bits, with externally-stored scale.
num_bits is the number of bits used to store each value.
noise is a float32 Tensor containing values in [0, 1).
Each value in noise should take different values across
different steps, approximating a uniform distribution over [0, 1).
In the case of replicated TPU training, noise should be identical
across replicas in order to keep the parameters identical across replicas.
The natural choice for noise would be tf.random_uniform(),
but this is not possible for TPU, since there is currently no way to seed
the different cores to produce identical values across replicas. Instead we
use noise_from_step_num() (see below).
The quantization scheme is as follows:
Compute the maximum absolute value by row (call this max_abs).
Store this either in an auxiliary variable or in an extra column.
Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a
float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1]
Unbiased randomized roundoff by adding noise and rounding down.
This produces a signed integer with num_bits bits which can then be stored.
Args:
x: a float32 Tensor
num_bits: an integer between 1 and 22
noise: a float Tensor broadcastable to the shape of x.
Returns:
a float32 Tensor
|
[
"Simulate",
"quantization",
"to",
"num_bits",
"bits",
"with",
"externally",
"-",
"stored",
"scale",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L89-L134
|
21,831
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
_randomized_roundoff_to_bfloat16
|
def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):
"""Round-off x to cand1 or to cand2 in an unbiased way.
Cand1 and cand2 are the same shape as x.
For every element of x, the corresponding elements of cand1 and cand2 should
be the two closest bfloat16 values to x. Order does not matter.
cand1 and cand2 must differ from each other.
Args:
x: A float32 Tensor.
noise: A Tensor broadcastable to the shape of x containing
random uniform values in [0.0, 1.0].
cand1: A bfloat16 Tensor the same shape as x.
cand2: A bfloat16 Tensor the same shape as x.
Returns:
A bfloat16 Tensor.
"""
cand1_f = tf.to_float(cand1)
cand2_f = tf.to_float(cand2)
step_size = cand2_f - cand1_f
fpart = (x - cand1_f) / step_size
ret = tf.where(tf.greater(fpart, noise), cand2, cand1)
return ret
|
python
|
def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):
"""Round-off x to cand1 or to cand2 in an unbiased way.
Cand1 and cand2 are the same shape as x.
For every element of x, the corresponding elements of cand1 and cand2 should
be the two closest bfloat16 values to x. Order does not matter.
cand1 and cand2 must differ from each other.
Args:
x: A float32 Tensor.
noise: A Tensor broadcastable to the shape of x containing
random uniform values in [0.0, 1.0].
cand1: A bfloat16 Tensor the same shape as x.
cand2: A bfloat16 Tensor the same shape as x.
Returns:
A bfloat16 Tensor.
"""
cand1_f = tf.to_float(cand1)
cand2_f = tf.to_float(cand2)
step_size = cand2_f - cand1_f
fpart = (x - cand1_f) / step_size
ret = tf.where(tf.greater(fpart, noise), cand2, cand1)
return ret
|
[
"def",
"_randomized_roundoff_to_bfloat16",
"(",
"x",
",",
"noise",
",",
"cand1",
",",
"cand2",
")",
":",
"cand1_f",
"=",
"tf",
".",
"to_float",
"(",
"cand1",
")",
"cand2_f",
"=",
"tf",
".",
"to_float",
"(",
"cand2",
")",
"step_size",
"=",
"cand2_f",
"-",
"cand1_f",
"fpart",
"=",
"(",
"x",
"-",
"cand1_f",
")",
"/",
"step_size",
"ret",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"greater",
"(",
"fpart",
",",
"noise",
")",
",",
"cand2",
",",
"cand1",
")",
"return",
"ret"
] |
Round-off x to cand1 or to cand2 in an unbiased way.
Cand1 and cand2 are the same shape as x.
For every element of x, the corresponding elements of cand1 and cand2 should
be the two closest bfloat16 values to x. Order does not matter.
cand1 and cand2 must differ from each other.
Args:
x: A float32 Tensor.
noise: A Tensor broadcastable to the shape of x containing
random uniform values in [0.0, 1.0].
cand1: A bfloat16 Tensor the same shape as x.
cand2: A bfloat16 Tensor the same shape as x.
Returns:
A bfloat16 Tensor.
|
[
"Round",
"-",
"off",
"x",
"to",
"cand1",
"or",
"to",
"cand2",
"in",
"an",
"unbiased",
"way",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L160-L183
|
21,832
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
_to_bfloat16_unbiased
|
def _to_bfloat16_unbiased(x, noise):
"""Convert a float32 to a bfloat16 using randomized roundoff.
Args:
x: A float32 Tensor.
noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
Returns:
A float32 Tensor.
"""
x_sign = tf.sign(x)
# Make sure x is positive. If it is zero, the two candidates are identical.
x = x * x_sign + 1e-30
cand1 = tf.to_bfloat16(x)
cand1_f = tf.to_float(cand1)
# This relies on the fact that for a positive bfloat16 b,
# b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the
# next lower one. Both 1.005 and 0.995 are ballpark estimation.
cand2 = tf.to_bfloat16(
tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))
ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)
return ret * tf.to_bfloat16(x_sign)
|
python
|
def _to_bfloat16_unbiased(x, noise):
"""Convert a float32 to a bfloat16 using randomized roundoff.
Args:
x: A float32 Tensor.
noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
Returns:
A float32 Tensor.
"""
x_sign = tf.sign(x)
# Make sure x is positive. If it is zero, the two candidates are identical.
x = x * x_sign + 1e-30
cand1 = tf.to_bfloat16(x)
cand1_f = tf.to_float(cand1)
# This relies on the fact that for a positive bfloat16 b,
# b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the
# next lower one. Both 1.005 and 0.995 are ballpark estimation.
cand2 = tf.to_bfloat16(
tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))
ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)
return ret * tf.to_bfloat16(x_sign)
|
[
"def",
"_to_bfloat16_unbiased",
"(",
"x",
",",
"noise",
")",
":",
"x_sign",
"=",
"tf",
".",
"sign",
"(",
"x",
")",
"# Make sure x is positive. If it is zero, the two candidates are identical.",
"x",
"=",
"x",
"*",
"x_sign",
"+",
"1e-30",
"cand1",
"=",
"tf",
".",
"to_bfloat16",
"(",
"x",
")",
"cand1_f",
"=",
"tf",
".",
"to_float",
"(",
"cand1",
")",
"# This relies on the fact that for a positive bfloat16 b,",
"# b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the",
"# next lower one. Both 1.005 and 0.995 are ballpark estimation.",
"cand2",
"=",
"tf",
".",
"to_bfloat16",
"(",
"tf",
".",
"where",
"(",
"tf",
".",
"greater",
"(",
"x",
",",
"cand1_f",
")",
",",
"cand1_f",
"*",
"1.005",
",",
"cand1_f",
"*",
"0.995",
")",
")",
"ret",
"=",
"_randomized_roundoff_to_bfloat16",
"(",
"x",
",",
"noise",
",",
"cand1",
",",
"cand2",
")",
"return",
"ret",
"*",
"tf",
".",
"to_bfloat16",
"(",
"x_sign",
")"
] |
Convert a float32 to a bfloat16 using randomized roundoff.
Args:
x: A float32 Tensor.
noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
Returns:
A float32 Tensor.
|
[
"Convert",
"a",
"float32",
"to",
"a",
"bfloat16",
"using",
"randomized",
"roundoff",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L186-L206
|
21,833
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/quantization.py
|
ParameterEncoding.custom_getter
|
def custom_getter(self, activation_dtype=tf.bfloat16):
"""A custom getter that uses the encoding for bfloat16 and float32 vars.
When a bfloat16 or float32 variable is requsted, an encoded float16
varaible is created, which is then decoded and cast to a bfloat16
activation.
Args:
activation_dtype: a dtype to which to convert the decoded value.
Returns:
a function.
"""
def getter_fn(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype in (tf.bfloat16, tf.float32):
kwargs["dtype"] = tf.bfloat16
kwargs["initializer"] = _EncodingInitializer(
kwargs["initializer"], self)
ret = self._decode_with_identity_gradient(getter(*args, **kwargs))
return tf.cast(ret, activation_dtype)
return getter(*args, **kwargs)
return getter_fn
|
python
|
def custom_getter(self, activation_dtype=tf.bfloat16):
"""A custom getter that uses the encoding for bfloat16 and float32 vars.
When a bfloat16 or float32 variable is requsted, an encoded float16
varaible is created, which is then decoded and cast to a bfloat16
activation.
Args:
activation_dtype: a dtype to which to convert the decoded value.
Returns:
a function.
"""
def getter_fn(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype in (tf.bfloat16, tf.float32):
kwargs["dtype"] = tf.bfloat16
kwargs["initializer"] = _EncodingInitializer(
kwargs["initializer"], self)
ret = self._decode_with_identity_gradient(getter(*args, **kwargs))
return tf.cast(ret, activation_dtype)
return getter(*args, **kwargs)
return getter_fn
|
[
"def",
"custom_getter",
"(",
"self",
",",
"activation_dtype",
"=",
"tf",
".",
"bfloat16",
")",
":",
"def",
"getter_fn",
"(",
"getter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"requested_dtype",
"=",
"kwargs",
"[",
"\"dtype\"",
"]",
"if",
"requested_dtype",
"in",
"(",
"tf",
".",
"bfloat16",
",",
"tf",
".",
"float32",
")",
":",
"kwargs",
"[",
"\"dtype\"",
"]",
"=",
"tf",
".",
"bfloat16",
"kwargs",
"[",
"\"initializer\"",
"]",
"=",
"_EncodingInitializer",
"(",
"kwargs",
"[",
"\"initializer\"",
"]",
",",
"self",
")",
"ret",
"=",
"self",
".",
"_decode_with_identity_gradient",
"(",
"getter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"tf",
".",
"cast",
"(",
"ret",
",",
"activation_dtype",
")",
"return",
"getter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"getter_fn"
] |
A custom getter that uses the encoding for bfloat16 and float32 vars.
When a bfloat16 or float32 variable is requsted, an encoded float16
varaible is created, which is then decoded and cast to a bfloat16
activation.
Args:
activation_dtype: a dtype to which to convert the decoded value.
Returns:
a function.
|
[
"A",
"custom",
"getter",
"that",
"uses",
"the",
"encoding",
"for",
"bfloat16",
"and",
"float32",
"vars",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/quantization.py#L246-L268
|
21,834
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
load_videos
|
def load_videos(template, video_length, frame_shape):
"""Loads videos from files.
Args:
template: template string for listing the image files.
video_length: length of the video.
frame_shape: shape of each frame.
Returns:
dataset: the tf dataset frame by frame.
dataset_len: number of the items which is the number of image files.
Raises:
ValueError: if no files found.
"""
filenames = tf.gfile.Glob(template)
if not filenames:
raise ValueError("no files found.")
filenames = sorted(filenames)
dataset_len = len(filenames)
filenames = tf.constant(filenames)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.apply(tf.data.experimental.map_and_batch(
lambda filename: load_image_map_function(filename, frame_shape),
video_length, drop_remainder=True))
return dataset, dataset_len
|
python
|
def load_videos(template, video_length, frame_shape):
"""Loads videos from files.
Args:
template: template string for listing the image files.
video_length: length of the video.
frame_shape: shape of each frame.
Returns:
dataset: the tf dataset frame by frame.
dataset_len: number of the items which is the number of image files.
Raises:
ValueError: if no files found.
"""
filenames = tf.gfile.Glob(template)
if not filenames:
raise ValueError("no files found.")
filenames = sorted(filenames)
dataset_len = len(filenames)
filenames = tf.constant(filenames)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.apply(tf.data.experimental.map_and_batch(
lambda filename: load_image_map_function(filename, frame_shape),
video_length, drop_remainder=True))
return dataset, dataset_len
|
[
"def",
"load_videos",
"(",
"template",
",",
"video_length",
",",
"frame_shape",
")",
":",
"filenames",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"template",
")",
"if",
"not",
"filenames",
":",
"raise",
"ValueError",
"(",
"\"no files found.\"",
")",
"filenames",
"=",
"sorted",
"(",
"filenames",
")",
"dataset_len",
"=",
"len",
"(",
"filenames",
")",
"filenames",
"=",
"tf",
".",
"constant",
"(",
"filenames",
")",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"filenames",
")",
"dataset",
"=",
"dataset",
".",
"apply",
"(",
"tf",
".",
"data",
".",
"experimental",
".",
"map_and_batch",
"(",
"lambda",
"filename",
":",
"load_image_map_function",
"(",
"filename",
",",
"frame_shape",
")",
",",
"video_length",
",",
"drop_remainder",
"=",
"True",
")",
")",
"return",
"dataset",
",",
"dataset_len"
] |
Loads videos from files.
Args:
template: template string for listing the image files.
video_length: length of the video.
frame_shape: shape of each frame.
Returns:
dataset: the tf dataset frame by frame.
dataset_len: number of the items which is the number of image files.
Raises:
ValueError: if no files found.
|
[
"Loads",
"videos",
"from",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L38-L63
|
21,835
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
psnr_and_ssim
|
def psnr_and_ssim(output, target):
"""Compute the PSNR and SSIM.
Args:
output: 4-D Tensor, shape=(num_frames, height, width, num_channels)
target: 4-D Tensor, shape=(num_frames, height, width, num_channels)
Returns:
psnr: 1-D Tensor, shape=(num_frames,)
ssim: 1-D Tensor, shape=(num_frames,)
"""
output = tf.cast(output, dtype=tf.int32)
target = tf.cast(target, dtype=tf.int32)
psnr = tf.image.psnr(output, target, max_val=255)
ssim = tf.image.ssim(output, target, max_val=255)
return psnr, ssim
|
python
|
def psnr_and_ssim(output, target):
"""Compute the PSNR and SSIM.
Args:
output: 4-D Tensor, shape=(num_frames, height, width, num_channels)
target: 4-D Tensor, shape=(num_frames, height, width, num_channels)
Returns:
psnr: 1-D Tensor, shape=(num_frames,)
ssim: 1-D Tensor, shape=(num_frames,)
"""
output = tf.cast(output, dtype=tf.int32)
target = tf.cast(target, dtype=tf.int32)
psnr = tf.image.psnr(output, target, max_val=255)
ssim = tf.image.ssim(output, target, max_val=255)
return psnr, ssim
|
[
"def",
"psnr_and_ssim",
"(",
"output",
",",
"target",
")",
":",
"output",
"=",
"tf",
".",
"cast",
"(",
"output",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"target",
"=",
"tf",
".",
"cast",
"(",
"target",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"psnr",
"=",
"tf",
".",
"image",
".",
"psnr",
"(",
"output",
",",
"target",
",",
"max_val",
"=",
"255",
")",
"ssim",
"=",
"tf",
".",
"image",
".",
"ssim",
"(",
"output",
",",
"target",
",",
"max_val",
"=",
"255",
")",
"return",
"psnr",
",",
"ssim"
] |
Compute the PSNR and SSIM.
Args:
output: 4-D Tensor, shape=(num_frames, height, width, num_channels)
target: 4-D Tensor, shape=(num_frames, height, width, num_channels)
Returns:
psnr: 1-D Tensor, shape=(num_frames,)
ssim: 1-D Tensor, shape=(num_frames,)
|
[
"Compute",
"the",
"PSNR",
"and",
"SSIM",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L93-L107
|
21,836
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
get_zipped_dataset_from_predictions
|
def get_zipped_dataset_from_predictions(predictions):
"""Creates dataset from in-memory predictions."""
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos, num_steps = targets.shape[:2]
# Truncate output time-steps to match target time-steps
outputs = outputs[:, :num_steps]
targets_placeholder = tf.placeholder(targets.dtype, targets.shape)
outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(targets_placeholder, outputs_placeholder))
iterator = dataset.make_initializable_iterator()
feed_dict = {targets_placeholder: targets,
outputs_placeholder: outputs}
return iterator, feed_dict, num_videos
|
python
|
def get_zipped_dataset_from_predictions(predictions):
"""Creates dataset from in-memory predictions."""
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos, num_steps = targets.shape[:2]
# Truncate output time-steps to match target time-steps
outputs = outputs[:, :num_steps]
targets_placeholder = tf.placeholder(targets.dtype, targets.shape)
outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(targets_placeholder, outputs_placeholder))
iterator = dataset.make_initializable_iterator()
feed_dict = {targets_placeholder: targets,
outputs_placeholder: outputs}
return iterator, feed_dict, num_videos
|
[
"def",
"get_zipped_dataset_from_predictions",
"(",
"predictions",
")",
":",
"targets",
"=",
"stack_data_given_key",
"(",
"predictions",
",",
"\"targets\"",
")",
"outputs",
"=",
"stack_data_given_key",
"(",
"predictions",
",",
"\"outputs\"",
")",
"num_videos",
",",
"num_steps",
"=",
"targets",
".",
"shape",
"[",
":",
"2",
"]",
"# Truncate output time-steps to match target time-steps",
"outputs",
"=",
"outputs",
"[",
":",
",",
":",
"num_steps",
"]",
"targets_placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"targets",
".",
"dtype",
",",
"targets",
".",
"shape",
")",
"outputs_placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"outputs",
".",
"dtype",
",",
"outputs",
".",
"shape",
")",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"(",
"targets_placeholder",
",",
"outputs_placeholder",
")",
")",
"iterator",
"=",
"dataset",
".",
"make_initializable_iterator",
"(",
")",
"feed_dict",
"=",
"{",
"targets_placeholder",
":",
"targets",
",",
"outputs_placeholder",
":",
"outputs",
"}",
"return",
"iterator",
",",
"feed_dict",
",",
"num_videos"
] |
Creates dataset from in-memory predictions.
|
[
"Creates",
"dataset",
"from",
"in",
"-",
"memory",
"predictions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L116-L132
|
21,837
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
reduce_to_best_decode
|
def reduce_to_best_decode(metrics, reduce_func):
"""Extracts the best-decode from the metrics according to reduce_func.
Args:
metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)
reduce_func: callable, np.argmax or np.argmin.
Returns:
best_metrics: 2-D numpy array, shape=(num_samples, num_frames).
best_decode_ind: 1-D numpy array, shape=(num_samples,)
"""
num_videos = metrics.shape[1]
# Take mean of the metric across the frames to approximate the video
# closest to the ground truth.
mean_across_frames = np.mean(metrics, axis=-1)
# For every sample, use the decode that has a maximum mean-metric.
best_decode_ind = reduce_func(mean_across_frames, axis=0)
best_metrics = metrics[best_decode_ind, np.arange(num_videos), :]
return best_metrics, best_decode_ind
|
python
|
def reduce_to_best_decode(metrics, reduce_func):
"""Extracts the best-decode from the metrics according to reduce_func.
Args:
metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)
reduce_func: callable, np.argmax or np.argmin.
Returns:
best_metrics: 2-D numpy array, shape=(num_samples, num_frames).
best_decode_ind: 1-D numpy array, shape=(num_samples,)
"""
num_videos = metrics.shape[1]
# Take mean of the metric across the frames to approximate the video
# closest to the ground truth.
mean_across_frames = np.mean(metrics, axis=-1)
# For every sample, use the decode that has a maximum mean-metric.
best_decode_ind = reduce_func(mean_across_frames, axis=0)
best_metrics = metrics[best_decode_ind, np.arange(num_videos), :]
return best_metrics, best_decode_ind
|
[
"def",
"reduce_to_best_decode",
"(",
"metrics",
",",
"reduce_func",
")",
":",
"num_videos",
"=",
"metrics",
".",
"shape",
"[",
"1",
"]",
"# Take mean of the metric across the frames to approximate the video",
"# closest to the ground truth.",
"mean_across_frames",
"=",
"np",
".",
"mean",
"(",
"metrics",
",",
"axis",
"=",
"-",
"1",
")",
"# For every sample, use the decode that has a maximum mean-metric.",
"best_decode_ind",
"=",
"reduce_func",
"(",
"mean_across_frames",
",",
"axis",
"=",
"0",
")",
"best_metrics",
"=",
"metrics",
"[",
"best_decode_ind",
",",
"np",
".",
"arange",
"(",
"num_videos",
")",
",",
":",
"]",
"return",
"best_metrics",
",",
"best_decode_ind"
] |
Extracts the best-decode from the metrics according to reduce_func.
Args:
metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)
reduce_func: callable, np.argmax or np.argmin.
Returns:
best_metrics: 2-D numpy array, shape=(num_samples, num_frames).
best_decode_ind: 1-D numpy array, shape=(num_samples,)
|
[
"Extracts",
"the",
"best",
"-",
"decode",
"from",
"the",
"metrics",
"according",
"to",
"reduce_func",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L167-L185
|
21,838
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
compute_all_metrics_statistics
|
def compute_all_metrics_statistics(all_results):
"""Computes statistics of metrics across multiple decodings.
Args:
all_results: dict of 3-D numpy arrays.
Each array has shape=(num_decodes, num_samples, num_frames).
Returns:
statistics: dict of 1-D numpy arrays, shape=(num_frames).
First the statistic (max/mean/std) is computed across the
decodes, then the mean is taken across num_samples.
decode_inds: dict of 1-D numpy arrays, shape=(num_samples,)
Each element represents the index of the decode corresponding
to the best statistic.
"""
statistics = {}
decode_inds = {}
all_metrics = all_results.keys()
for key in all_metrics:
values = all_results[key]
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(values, axis=0)
min_stats, min_decode_ind = reduce_to_best_decode(values, np.argmin)
statistics[key + "_MIN"] = min_stats
decode_inds[key + "_MIN_DECODE"] = min_decode_ind
max_stats, max_decode_ind = reduce_to_best_decode(values, np.argmax)
statistics[key + "_MAX"] = max_stats
decode_inds[key + "_MAX_DECODE"] = max_decode_ind
# Computes mean of each statistic across the dataset.
for key in statistics:
statistics[key] = np.mean(statistics[key], axis=0)
return statistics, decode_inds
|
python
|
def compute_all_metrics_statistics(all_results):
"""Computes statistics of metrics across multiple decodings.
Args:
all_results: dict of 3-D numpy arrays.
Each array has shape=(num_decodes, num_samples, num_frames).
Returns:
statistics: dict of 1-D numpy arrays, shape=(num_frames).
First the statistic (max/mean/std) is computed across the
decodes, then the mean is taken across num_samples.
decode_inds: dict of 1-D numpy arrays, shape=(num_samples,)
Each element represents the index of the decode corresponding
to the best statistic.
"""
statistics = {}
decode_inds = {}
all_metrics = all_results.keys()
for key in all_metrics:
values = all_results[key]
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(values, axis=0)
min_stats, min_decode_ind = reduce_to_best_decode(values, np.argmin)
statistics[key + "_MIN"] = min_stats
decode_inds[key + "_MIN_DECODE"] = min_decode_ind
max_stats, max_decode_ind = reduce_to_best_decode(values, np.argmax)
statistics[key + "_MAX"] = max_stats
decode_inds[key + "_MAX_DECODE"] = max_decode_ind
# Computes mean of each statistic across the dataset.
for key in statistics:
statistics[key] = np.mean(statistics[key], axis=0)
return statistics, decode_inds
|
[
"def",
"compute_all_metrics_statistics",
"(",
"all_results",
")",
":",
"statistics",
"=",
"{",
"}",
"decode_inds",
"=",
"{",
"}",
"all_metrics",
"=",
"all_results",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"all_metrics",
":",
"values",
"=",
"all_results",
"[",
"key",
"]",
"statistics",
"[",
"key",
"+",
"\"_MEAN\"",
"]",
"=",
"np",
".",
"mean",
"(",
"values",
",",
"axis",
"=",
"0",
")",
"statistics",
"[",
"key",
"+",
"\"_STD\"",
"]",
"=",
"np",
".",
"std",
"(",
"values",
",",
"axis",
"=",
"0",
")",
"min_stats",
",",
"min_decode_ind",
"=",
"reduce_to_best_decode",
"(",
"values",
",",
"np",
".",
"argmin",
")",
"statistics",
"[",
"key",
"+",
"\"_MIN\"",
"]",
"=",
"min_stats",
"decode_inds",
"[",
"key",
"+",
"\"_MIN_DECODE\"",
"]",
"=",
"min_decode_ind",
"max_stats",
",",
"max_decode_ind",
"=",
"reduce_to_best_decode",
"(",
"values",
",",
"np",
".",
"argmax",
")",
"statistics",
"[",
"key",
"+",
"\"_MAX\"",
"]",
"=",
"max_stats",
"decode_inds",
"[",
"key",
"+",
"\"_MAX_DECODE\"",
"]",
"=",
"max_decode_ind",
"# Computes mean of each statistic across the dataset.",
"for",
"key",
"in",
"statistics",
":",
"statistics",
"[",
"key",
"]",
"=",
"np",
".",
"mean",
"(",
"statistics",
"[",
"key",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"statistics",
",",
"decode_inds"
] |
Computes statistics of metrics across multiple decodings.
Args:
all_results: dict of 3-D numpy arrays.
Each array has shape=(num_decodes, num_samples, num_frames).
Returns:
statistics: dict of 1-D numpy arrays, shape=(num_frames).
First the statistic (max/mean/std) is computed across the
decodes, then the mean is taken across num_samples.
decode_inds: dict of 1-D numpy arrays, shape=(num_samples,)
Each element represents the index of the decode corresponding
to the best statistic.
|
[
"Computes",
"statistics",
"of",
"metrics",
"across",
"multiple",
"decodings",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L188-L220
|
21,839
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
compute_video_metrics_from_predictions
|
def compute_video_metrics_from_predictions(predictions, decode_hparams):
"""Computes metrics from predictions.
Args:
predictions: list of list of dicts.
outer length: num_decodes, inner_length: num_samples
decode_hparams: Decode hparams. instance of HParams.
Returns:
statistics: dict of Tensors, key being the metric with each Tensor
having the shape (num_samples, num_frames).
"""
all_results = {}
ssim_all_decodes, psnr_all_decodes = [], []
for single_decode in predictions:
args = get_zipped_dataset_from_predictions(single_decode)
psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)
psnr_all_decodes.append(psnr_single)
ssim_all_decodes.append(ssim_single)
psnr_all_decodes = np.array(psnr_all_decodes)
ssim_all_decodes = np.array(ssim_all_decodes)
all_results.update({"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes})
return compute_all_metrics_statistics(all_results)
|
python
|
def compute_video_metrics_from_predictions(predictions, decode_hparams):
"""Computes metrics from predictions.
Args:
predictions: list of list of dicts.
outer length: num_decodes, inner_length: num_samples
decode_hparams: Decode hparams. instance of HParams.
Returns:
statistics: dict of Tensors, key being the metric with each Tensor
having the shape (num_samples, num_frames).
"""
all_results = {}
ssim_all_decodes, psnr_all_decodes = [], []
for single_decode in predictions:
args = get_zipped_dataset_from_predictions(single_decode)
psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)
psnr_all_decodes.append(psnr_single)
ssim_all_decodes.append(ssim_single)
psnr_all_decodes = np.array(psnr_all_decodes)
ssim_all_decodes = np.array(ssim_all_decodes)
all_results.update({"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes})
return compute_all_metrics_statistics(all_results)
|
[
"def",
"compute_video_metrics_from_predictions",
"(",
"predictions",
",",
"decode_hparams",
")",
":",
"all_results",
"=",
"{",
"}",
"ssim_all_decodes",
",",
"psnr_all_decodes",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"single_decode",
"in",
"predictions",
":",
"args",
"=",
"get_zipped_dataset_from_predictions",
"(",
"single_decode",
")",
"psnr_single",
",",
"ssim_single",
"=",
"compute_one_decoding_video_metrics",
"(",
"*",
"args",
")",
"psnr_all_decodes",
".",
"append",
"(",
"psnr_single",
")",
"ssim_all_decodes",
".",
"append",
"(",
"ssim_single",
")",
"psnr_all_decodes",
"=",
"np",
".",
"array",
"(",
"psnr_all_decodes",
")",
"ssim_all_decodes",
"=",
"np",
".",
"array",
"(",
"ssim_all_decodes",
")",
"all_results",
".",
"update",
"(",
"{",
"\"PSNR\"",
":",
"psnr_all_decodes",
",",
"\"SSIM\"",
":",
"ssim_all_decodes",
"}",
")",
"return",
"compute_all_metrics_statistics",
"(",
"all_results",
")"
] |
Computes metrics from predictions.
Args:
predictions: list of list of dicts.
outer length: num_decodes, inner_length: num_samples
decode_hparams: Decode hparams. instance of HParams.
Returns:
statistics: dict of Tensors, key being the metric with each Tensor
having the shape (num_samples, num_frames).
|
[
"Computes",
"metrics",
"from",
"predictions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L223-L246
|
21,840
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/video_metrics.py
|
compute_and_save_video_metrics
|
def compute_and_save_video_metrics(
output_dirs, problem_name, video_length, frame_shape):
"""Compute and saves the video metrics."""
statistics, all_results = compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape)
for results, output_dir in zip(all_results, output_dirs):
save_results(results, output_dir, problem_name)
parent_dir = os.path.join(output_dirs[0], os.pardir)
final_dir = os.path.join(parent_dir, "decode")
tf.gfile.MakeDirs(parent_dir)
save_results(statistics, final_dir, problem_name)
|
python
|
def compute_and_save_video_metrics(
output_dirs, problem_name, video_length, frame_shape):
"""Compute and saves the video metrics."""
statistics, all_results = compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape)
for results, output_dir in zip(all_results, output_dirs):
save_results(results, output_dir, problem_name)
parent_dir = os.path.join(output_dirs[0], os.pardir)
final_dir = os.path.join(parent_dir, "decode")
tf.gfile.MakeDirs(parent_dir)
save_results(statistics, final_dir, problem_name)
|
[
"def",
"compute_and_save_video_metrics",
"(",
"output_dirs",
",",
"problem_name",
",",
"video_length",
",",
"frame_shape",
")",
":",
"statistics",
",",
"all_results",
"=",
"compute_video_metrics_from_png_files",
"(",
"output_dirs",
",",
"problem_name",
",",
"video_length",
",",
"frame_shape",
")",
"for",
"results",
",",
"output_dir",
"in",
"zip",
"(",
"all_results",
",",
"output_dirs",
")",
":",
"save_results",
"(",
"results",
",",
"output_dir",
",",
"problem_name",
")",
"parent_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dirs",
"[",
"0",
"]",
",",
"os",
".",
"pardir",
")",
"final_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"\"decode\"",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"parent_dir",
")",
"save_results",
"(",
"statistics",
",",
"final_dir",
",",
"problem_name",
")"
] |
Compute and saves the video metrics.
|
[
"Compute",
"and",
"saves",
"the",
"video",
"metrics",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/video_metrics.py#L282-L294
|
21,841
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
basic_lstm
|
def basic_lstm(inputs, state, num_units, name=None):
"""Basic LSTM."""
input_shape = common_layers.shape_list(inputs)
# reuse parameters across time-steps.
cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, name=name, reuse=tf.AUTO_REUSE)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
python
|
def basic_lstm(inputs, state, num_units, name=None):
"""Basic LSTM."""
input_shape = common_layers.shape_list(inputs)
# reuse parameters across time-steps.
cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, name=name, reuse=tf.AUTO_REUSE)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
[
"def",
"basic_lstm",
"(",
"inputs",
",",
"state",
",",
"num_units",
",",
"name",
"=",
"None",
")",
":",
"input_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"# reuse parameters across time-steps.",
"cell",
"=",
"tf",
".",
"nn",
".",
"rnn_cell",
".",
"BasicLSTMCell",
"(",
"num_units",
",",
"name",
"=",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
"if",
"state",
"is",
"None",
":",
"state",
"=",
"cell",
".",
"zero_state",
"(",
"input_shape",
"[",
"0",
"]",
",",
"tf",
".",
"float32",
")",
"outputs",
",",
"new_state",
"=",
"cell",
"(",
"inputs",
",",
"state",
")",
"return",
"outputs",
",",
"new_state"
] |
Basic LSTM.
|
[
"Basic",
"LSTM",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L68-L77
|
21,842
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
lstm_cell
|
def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
"""Full LSTM cell."""
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
python
|
def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
"""Full LSTM cell."""
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
[
"def",
"lstm_cell",
"(",
"inputs",
",",
"state",
",",
"num_units",
",",
"use_peepholes",
"=",
"False",
",",
"cell_clip",
"=",
"0.0",
",",
"initializer",
"=",
"None",
",",
"num_proj",
"=",
"None",
",",
"num_unit_shards",
"=",
"None",
",",
"num_proj_shards",
"=",
"None",
",",
"reuse",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"input_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"cell",
"=",
"tf",
".",
"nn",
".",
"rnn_cell",
".",
"LSTMCell",
"(",
"num_units",
",",
"use_peepholes",
"=",
"use_peepholes",
",",
"cell_clip",
"=",
"cell_clip",
",",
"initializer",
"=",
"initializer",
",",
"num_proj",
"=",
"num_proj",
",",
"num_unit_shards",
"=",
"num_unit_shards",
",",
"num_proj_shards",
"=",
"num_proj_shards",
",",
"reuse",
"=",
"reuse",
",",
"name",
"=",
"name",
",",
"state_is_tuple",
"=",
"False",
")",
"if",
"state",
"is",
"None",
":",
"state",
"=",
"cell",
".",
"zero_state",
"(",
"input_shape",
"[",
"0",
"]",
",",
"tf",
".",
"float32",
")",
"outputs",
",",
"new_state",
"=",
"cell",
"(",
"inputs",
",",
"state",
")",
"return",
"outputs",
",",
"new_state"
] |
Full LSTM cell.
|
[
"Full",
"LSTM",
"cell",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L80-L106
|
21,843
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
conv_lstm_2d
|
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
python
|
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
[
"def",
"conv_lstm_2d",
"(",
"inputs",
",",
"state",
",",
"output_channels",
",",
"kernel_size",
"=",
"5",
",",
"name",
"=",
"None",
",",
"spatial_dims",
"=",
"None",
")",
":",
"input_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"batch_size",
",",
"input_channels",
"=",
"input_shape",
"[",
"0",
"]",
",",
"input_shape",
"[",
"-",
"1",
"]",
"if",
"spatial_dims",
"is",
"None",
":",
"input_shape",
"=",
"input_shape",
"[",
"1",
":",
"]",
"else",
":",
"input_shape",
"=",
"spatial_dims",
"+",
"[",
"input_channels",
"]",
"cell",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"ConvLSTMCell",
"(",
"2",
",",
"input_shape",
",",
"output_channels",
",",
"[",
"kernel_size",
",",
"kernel_size",
"]",
",",
"name",
"=",
"name",
")",
"if",
"state",
"is",
"None",
":",
"state",
"=",
"cell",
".",
"zero_state",
"(",
"batch_size",
",",
"tf",
".",
"float32",
")",
"outputs",
",",
"new_state",
"=",
"cell",
"(",
"inputs",
",",
"state",
")",
"return",
"outputs",
",",
"new_state"
] |
2D Convolutional LSTM.
|
[
"2D",
"Convolutional",
"LSTM",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L109-L125
|
21,844
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
scheduled_sample_count
|
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
|
python
|
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
|
[
"def",
"scheduled_sample_count",
"(",
"ground_truth_x",
",",
"generated_x",
",",
"batch_size",
",",
"scheduled_sample_var",
")",
":",
"num_ground_truth",
"=",
"scheduled_sample_var",
"idx",
"=",
"tf",
".",
"random_shuffle",
"(",
"tf",
".",
"range",
"(",
"batch_size",
")",
")",
"ground_truth_idx",
"=",
"tf",
".",
"gather",
"(",
"idx",
",",
"tf",
".",
"range",
"(",
"num_ground_truth",
")",
")",
"generated_idx",
"=",
"tf",
".",
"gather",
"(",
"idx",
",",
"tf",
".",
"range",
"(",
"num_ground_truth",
",",
"batch_size",
")",
")",
"ground_truth_examps",
"=",
"tf",
".",
"gather",
"(",
"ground_truth_x",
",",
"ground_truth_idx",
")",
"generated_examps",
"=",
"tf",
".",
"gather",
"(",
"generated_x",
",",
"generated_idx",
")",
"output",
"=",
"tf",
".",
"dynamic_stitch",
"(",
"[",
"ground_truth_idx",
",",
"generated_idx",
"]",
",",
"[",
"ground_truth_examps",
",",
"generated_examps",
"]",
")",
"# if batch size is known set it.",
"if",
"isinstance",
"(",
"batch_size",
",",
"int",
")",
":",
"output",
".",
"set_shape",
"(",
"[",
"batch_size",
"]",
"+",
"common_layers",
".",
"shape_list",
"(",
"output",
")",
"[",
"1",
":",
"]",
")",
"return",
"output"
] |
Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
|
[
"Sample",
"batch",
"with",
"specified",
"mix",
"of",
"groundtruth",
"and",
"generated",
"data",
"points",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L128-L156
|
21,845
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
inject_additional_input
|
def inject_additional_input(layer, inputs, name, mode="concat"):
"""Injects the additional input into the layer.
Args:
layer: layer that the input should be injected to.
inputs: inputs to be injected.
name: TF scope name.
mode: how the infor should be added to the layer:
"concat" concats as additional channels.
"multiplicative" broadcasts inputs and multiply them to the channels.
"multi_additive" broadcasts inputs and multiply and add to the channels.
Returns:
updated layer.
Raises:
ValueError: in case of unknown mode.
"""
layer_shape = common_layers.shape_list(layer)
input_shape = common_layers.shape_list(inputs)
zeros_mask = tf.zeros(layer_shape, dtype=tf.float32)
if mode == "concat":
emb = encode_to_shape(inputs, layer_shape, name)
layer = tf.concat(values=[layer, emb], axis=-1)
elif mode == "multiplicative":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mask = tf.layers.dense(input_reshaped, filters, name=name)
input_broad = input_mask + zeros_mask
layer *= input_broad
elif mode == "multi_additive":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul")
layer *= tf.nn.sigmoid(input_mul)
input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add")
layer += input_add
else:
raise ValueError("Unknown injection mode: %s" % mode)
return layer
|
python
|
def inject_additional_input(layer, inputs, name, mode="concat"):
"""Injects the additional input into the layer.
Args:
layer: layer that the input should be injected to.
inputs: inputs to be injected.
name: TF scope name.
mode: how the infor should be added to the layer:
"concat" concats as additional channels.
"multiplicative" broadcasts inputs and multiply them to the channels.
"multi_additive" broadcasts inputs and multiply and add to the channels.
Returns:
updated layer.
Raises:
ValueError: in case of unknown mode.
"""
layer_shape = common_layers.shape_list(layer)
input_shape = common_layers.shape_list(inputs)
zeros_mask = tf.zeros(layer_shape, dtype=tf.float32)
if mode == "concat":
emb = encode_to_shape(inputs, layer_shape, name)
layer = tf.concat(values=[layer, emb], axis=-1)
elif mode == "multiplicative":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mask = tf.layers.dense(input_reshaped, filters, name=name)
input_broad = input_mask + zeros_mask
layer *= input_broad
elif mode == "multi_additive":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul")
layer *= tf.nn.sigmoid(input_mul)
input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add")
layer += input_add
else:
raise ValueError("Unknown injection mode: %s" % mode)
return layer
|
[
"def",
"inject_additional_input",
"(",
"layer",
",",
"inputs",
",",
"name",
",",
"mode",
"=",
"\"concat\"",
")",
":",
"layer_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"layer",
")",
"input_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"zeros_mask",
"=",
"tf",
".",
"zeros",
"(",
"layer_shape",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"if",
"mode",
"==",
"\"concat\"",
":",
"emb",
"=",
"encode_to_shape",
"(",
"inputs",
",",
"layer_shape",
",",
"name",
")",
"layer",
"=",
"tf",
".",
"concat",
"(",
"values",
"=",
"[",
"layer",
",",
"emb",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"elif",
"mode",
"==",
"\"multiplicative\"",
":",
"filters",
"=",
"layer_shape",
"[",
"-",
"1",
"]",
"input_reshaped",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"[",
"-",
"1",
",",
"1",
",",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
"]",
")",
"input_mask",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"input_reshaped",
",",
"filters",
",",
"name",
"=",
"name",
")",
"input_broad",
"=",
"input_mask",
"+",
"zeros_mask",
"layer",
"*=",
"input_broad",
"elif",
"mode",
"==",
"\"multi_additive\"",
":",
"filters",
"=",
"layer_shape",
"[",
"-",
"1",
"]",
"input_reshaped",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"[",
"-",
"1",
",",
"1",
",",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
"]",
")",
"input_mul",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"input_reshaped",
",",
"filters",
",",
"name",
"=",
"name",
"+",
"\"_mul\"",
")",
"layer",
"*=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"input_mul",
")",
"input_add",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"input_reshaped",
",",
"filters",
",",
"name",
"=",
"name",
"+",
"\"_add\"",
")",
"layer",
"+=",
"input_add",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown injection mode: %s\"",
"%",
"mode",
")",
"return",
"layer"
] |
Injects the additional input into the layer.
Args:
layer: layer that the input should be injected to.
inputs: inputs to be injected.
name: TF scope name.
mode: how the infor should be added to the layer:
"concat" concats as additional channels.
"multiplicative" broadcasts inputs and multiply them to the channels.
"multi_additive" broadcasts inputs and multiply and add to the channels.
Returns:
updated layer.
Raises:
ValueError: in case of unknown mode.
|
[
"Injects",
"the",
"additional",
"input",
"into",
"the",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L159-L199
|
21,846
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
scheduled_sample_prob
|
def scheduled_sample_prob(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Probability based scheduled sampling.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: probability of choosing from ground_truth.
Returns:
New batch with randomly selected data points.
"""
probability_threshold = scheduled_sample_var
probability_of_generated = tf.random_uniform([batch_size])
return tf.where(probability_of_generated > probability_threshold,
generated_x, ground_truth_x)
|
python
|
def scheduled_sample_prob(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Probability based scheduled sampling.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: probability of choosing from ground_truth.
Returns:
New batch with randomly selected data points.
"""
probability_threshold = scheduled_sample_var
probability_of_generated = tf.random_uniform([batch_size])
return tf.where(probability_of_generated > probability_threshold,
generated_x, ground_truth_x)
|
[
"def",
"scheduled_sample_prob",
"(",
"ground_truth_x",
",",
"generated_x",
",",
"batch_size",
",",
"scheduled_sample_var",
")",
":",
"probability_threshold",
"=",
"scheduled_sample_var",
"probability_of_generated",
"=",
"tf",
".",
"random_uniform",
"(",
"[",
"batch_size",
"]",
")",
"return",
"tf",
".",
"where",
"(",
"probability_of_generated",
">",
"probability_threshold",
",",
"generated_x",
",",
"ground_truth_x",
")"
] |
Probability based scheduled sampling.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: probability of choosing from ground_truth.
Returns:
New batch with randomly selected data points.
|
[
"Probability",
"based",
"scheduled",
"sampling",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L202-L219
|
21,847
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
dna_transformation
|
def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(
kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
|
python
|
def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(
kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
|
[
"def",
"dna_transformation",
"(",
"prev_image",
",",
"dna_input",
",",
"dna_kernel_size",
",",
"relu_shift",
")",
":",
"# Construct translated images.",
"prev_image_pad",
"=",
"tf",
".",
"pad",
"(",
"prev_image",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"2",
",",
"2",
"]",
",",
"[",
"2",
",",
"2",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"image_height",
"=",
"int",
"(",
"prev_image",
".",
"get_shape",
"(",
")",
"[",
"1",
"]",
")",
"image_width",
"=",
"int",
"(",
"prev_image",
".",
"get_shape",
"(",
")",
"[",
"2",
"]",
")",
"inputs",
"=",
"[",
"]",
"for",
"xkern",
"in",
"range",
"(",
"dna_kernel_size",
")",
":",
"for",
"ykern",
"in",
"range",
"(",
"dna_kernel_size",
")",
":",
"inputs",
".",
"append",
"(",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"slice",
"(",
"prev_image_pad",
",",
"[",
"0",
",",
"xkern",
",",
"ykern",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"image_height",
",",
"image_width",
",",
"-",
"1",
"]",
")",
",",
"[",
"3",
"]",
")",
")",
"inputs",
"=",
"tf",
".",
"concat",
"(",
"axis",
"=",
"3",
",",
"values",
"=",
"inputs",
")",
"# Normalize channels to 1.",
"kernel",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"dna_input",
"-",
"relu_shift",
")",
"+",
"relu_shift",
"kernel",
"=",
"tf",
".",
"expand_dims",
"(",
"kernel",
"/",
"tf",
".",
"reduce_sum",
"(",
"kernel",
",",
"[",
"3",
"]",
",",
"keep_dims",
"=",
"True",
")",
",",
"[",
"4",
"]",
")",
"return",
"tf",
".",
"reduce_sum",
"(",
"kernel",
"*",
"inputs",
",",
"[",
"3",
"]",
",",
"keep_dims",
"=",
"False",
")"
] |
Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
|
[
"Apply",
"dynamic",
"neural",
"advection",
"to",
"previous",
"image",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L222-L251
|
21,848
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
cdna_transformation
|
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,
dna_kernel_size, relu_shift):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = tf.shape(cdna_input)[0]
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = tfl.dense(
cdna_input, dna_kernel_size * dna_kernel_size * num_masks,
name="cdna_params",
activation=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])
cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift)
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(
cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(
prev_image, cdna_kerns, [1, 1, 1, 1], "SAME")
# Transpose the dimensions to where they belong.
transformed = tf.reshape(
transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
|
python
|
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,
dna_kernel_size, relu_shift):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = tf.shape(cdna_input)[0]
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = tfl.dense(
cdna_input, dna_kernel_size * dna_kernel_size * num_masks,
name="cdna_params",
activation=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])
cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift)
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(
cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(
prev_image, cdna_kerns, [1, 1, 1, 1], "SAME")
# Transpose the dimensions to where they belong.
transformed = tf.reshape(
transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
|
[
"def",
"cdna_transformation",
"(",
"prev_image",
",",
"cdna_input",
",",
"num_masks",
",",
"color_channels",
",",
"dna_kernel_size",
",",
"relu_shift",
")",
":",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"cdna_input",
")",
"[",
"0",
"]",
"height",
"=",
"int",
"(",
"prev_image",
".",
"get_shape",
"(",
")",
"[",
"1",
"]",
")",
"width",
"=",
"int",
"(",
"prev_image",
".",
"get_shape",
"(",
")",
"[",
"2",
"]",
")",
"# Predict kernels using linear function of last hidden layer.",
"cdna_kerns",
"=",
"tfl",
".",
"dense",
"(",
"cdna_input",
",",
"dna_kernel_size",
"*",
"dna_kernel_size",
"*",
"num_masks",
",",
"name",
"=",
"\"cdna_params\"",
",",
"activation",
"=",
"None",
")",
"# Reshape and normalize.",
"cdna_kerns",
"=",
"tf",
".",
"reshape",
"(",
"cdna_kerns",
",",
"[",
"batch_size",
",",
"dna_kernel_size",
",",
"dna_kernel_size",
",",
"1",
",",
"num_masks",
"]",
")",
"cdna_kerns",
"=",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"cdna_kerns",
"-",
"relu_shift",
")",
"+",
"relu_shift",
")",
"norm_factor",
"=",
"tf",
".",
"reduce_sum",
"(",
"cdna_kerns",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
",",
"keep_dims",
"=",
"True",
")",
"cdna_kerns",
"/=",
"norm_factor",
"# Treat the color channel dimension as the batch dimension since the same",
"# transformation is applied to each color channel.",
"# Treat the batch dimension as the channel dimension so that",
"# depthwise_conv2d can apply a different transformation to each sample.",
"cdna_kerns",
"=",
"tf",
".",
"transpose",
"(",
"cdna_kerns",
",",
"[",
"1",
",",
"2",
",",
"0",
",",
"4",
",",
"3",
"]",
")",
"cdna_kerns",
"=",
"tf",
".",
"reshape",
"(",
"cdna_kerns",
",",
"[",
"dna_kernel_size",
",",
"dna_kernel_size",
",",
"batch_size",
",",
"num_masks",
"]",
")",
"# Swap the batch and channel dimensions.",
"prev_image",
"=",
"tf",
".",
"transpose",
"(",
"prev_image",
",",
"[",
"3",
",",
"1",
",",
"2",
",",
"0",
"]",
")",
"# Transform image.",
"transformed",
"=",
"tf",
".",
"nn",
".",
"depthwise_conv2d",
"(",
"prev_image",
",",
"cdna_kerns",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"\"SAME\"",
")",
"# Transpose the dimensions to where they belong.",
"transformed",
"=",
"tf",
".",
"reshape",
"(",
"transformed",
",",
"[",
"color_channels",
",",
"height",
",",
"width",
",",
"batch_size",
",",
"num_masks",
"]",
")",
"transformed",
"=",
"tf",
".",
"transpose",
"(",
"transformed",
",",
"[",
"3",
",",
"1",
",",
"2",
",",
"0",
",",
"4",
"]",
")",
"transformed",
"=",
"tf",
".",
"unstack",
"(",
"transformed",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"transformed"
] |
Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
|
[
"Apply",
"convolutional",
"dynamic",
"neural",
"advection",
"to",
"previous",
"image",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L254-L304
|
21,849
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
vgg_layer
|
def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
"""A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
"""
with tf.variable_scope(scope):
net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,
activation=None, name="conv")
if has_batchnorm:
net = tfl.batch_normalization(net, training=is_training, name="bn")
net = activation(net)
return net
|
python
|
def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
"""A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
"""
with tf.variable_scope(scope):
net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,
activation=None, name="conv")
if has_batchnorm:
net = tfl.batch_normalization(net, training=is_training, name="bn")
net = activation(net)
return net
|
[
"def",
"vgg_layer",
"(",
"inputs",
",",
"nout",
",",
"kernel_size",
"=",
"3",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"leaky_relu",
",",
"padding",
"=",
"\"SAME\"",
",",
"is_training",
"=",
"True",
",",
"has_batchnorm",
"=",
"False",
",",
"scope",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
")",
":",
"net",
"=",
"tfl",
".",
"conv2d",
"(",
"inputs",
",",
"nout",
",",
"kernel_size",
"=",
"kernel_size",
",",
"padding",
"=",
"padding",
",",
"activation",
"=",
"None",
",",
"name",
"=",
"\"conv\"",
")",
"if",
"has_batchnorm",
":",
"net",
"=",
"tfl",
".",
"batch_normalization",
"(",
"net",
",",
"training",
"=",
"is_training",
",",
"name",
"=",
"\"bn\"",
")",
"net",
"=",
"activation",
"(",
"net",
")",
"return",
"net"
] |
A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
|
[
"A",
"layer",
"of",
"VGG",
"network",
"with",
"batch",
"norm",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L307-L335
|
21,850
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
tile_and_concat
|
def tile_and_concat(image, latent, concat_latent=True):
"""Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
"""
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape[1]
height_multiples = height // latent_dims
pad = height - (height_multiples * latent_dims)
latent = tf.reshape(latent, (-1, latent_dims, 1, 1))
latent = tf.tile(latent, (1, height_multiples, width, 1))
latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]])
return tf.concat([image, latent], axis=-1)
|
python
|
def tile_and_concat(image, latent, concat_latent=True):
"""Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
"""
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape[1]
height_multiples = height // latent_dims
pad = height - (height_multiples * latent_dims)
latent = tf.reshape(latent, (-1, latent_dims, 1, 1))
latent = tf.tile(latent, (1, height_multiples, width, 1))
latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]])
return tf.concat([image, latent], axis=-1)
|
[
"def",
"tile_and_concat",
"(",
"image",
",",
"latent",
",",
"concat_latent",
"=",
"True",
")",
":",
"if",
"not",
"concat_latent",
":",
"return",
"image",
"image_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"image",
")",
"latent_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"latent",
")",
"height",
",",
"width",
"=",
"image_shape",
"[",
"1",
"]",
",",
"image_shape",
"[",
"2",
"]",
"latent_dims",
"=",
"latent_shape",
"[",
"1",
"]",
"height_multiples",
"=",
"height",
"//",
"latent_dims",
"pad",
"=",
"height",
"-",
"(",
"height_multiples",
"*",
"latent_dims",
")",
"latent",
"=",
"tf",
".",
"reshape",
"(",
"latent",
",",
"(",
"-",
"1",
",",
"latent_dims",
",",
"1",
",",
"1",
")",
")",
"latent",
"=",
"tf",
".",
"tile",
"(",
"latent",
",",
"(",
"1",
",",
"height_multiples",
",",
"width",
",",
"1",
")",
")",
"latent",
"=",
"tf",
".",
"pad",
"(",
"latent",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"pad",
"//",
"2",
",",
"pad",
"//",
"2",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"return",
"tf",
".",
"concat",
"(",
"[",
"image",
",",
"latent",
"]",
",",
"axis",
"=",
"-",
"1",
")"
] |
Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
|
[
"Tile",
"latent",
"and",
"concatenate",
"to",
"image",
"across",
"depth",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L338-L361
|
21,851
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
_encode_gif
|
def _encode_gif(images, fps):
"""Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
"""
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish()
|
python
|
def _encode_gif(images, fps):
"""Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
"""
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish()
|
[
"def",
"_encode_gif",
"(",
"images",
",",
"fps",
")",
":",
"writer",
"=",
"WholeVideoWriter",
"(",
"fps",
")",
"writer",
".",
"write_multi",
"(",
"images",
")",
"return",
"writer",
".",
"finish",
"(",
")"
] |
Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
|
[
"Encodes",
"numpy",
"images",
"into",
"gif",
"string",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L364-L380
|
21,852
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
ffmpeg_works
|
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False
|
python
|
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False
|
[
"def",
"ffmpeg_works",
"(",
")",
":",
"images",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"32",
",",
"32",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"try",
":",
"_encode_gif",
"(",
"images",
",",
"2",
")",
"return",
"True",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"return",
"False"
] |
Tries to encode images with ffmpeg to check if it works.
|
[
"Tries",
"to",
"encode",
"images",
"with",
"ffmpeg",
"to",
"check",
"if",
"it",
"works",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L383-L390
|
21,853
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
conv_latent_tower
|
def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5,
is_training=False, random_latent=False,
tiny_mode=False, small_mode=False):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
time_axis: the time axis in images tensor
latent_channels: number of latent channels
min_logvar: minimum value for log_var
is_training: whether or not it is training mode
random_latent: whether or not generate random latents
tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number
of conv channels to 1 at each layer. useful for testing the
integration tests.
small_mode: whether or not it is small_mode. small mode is the same model
with less conv and lstm layers and also lower number of channels.
suitable for videos with less complexity and testing.
Returns:
latent_mean: predicted latent mean
latent_logvar: predicted latent log variance
"""
conv_size = tinyify([32, 64, 64], tiny_mode, small_mode)
with tf.variable_scope("latent", reuse=tf.AUTO_REUSE):
images = tf.to_float(images)
images = tf.unstack(images, axis=time_axis)
images = tf.concat(images, axis=3)
x = images
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv1")
x = tfcl.layer_norm(x)
if not small_mode:
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv2")
x = tfcl.layer_norm(x)
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="latent_conv3")
x = tfcl.layer_norm(x)
nc = latent_channels
mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=None, name="latent_mean")
logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_std")
logvar = logv + min_logvar
# No latent tower at inference time, just standard gaussian.
if not is_training:
return tf.zeros_like(mean), tf.zeros_like(logvar)
# No latent in the first phase
ret_mean, ret_logvar = tf.cond(
random_latent,
lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)),
lambda: (mean, logvar))
return ret_mean, ret_logvar
|
python
|
def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5,
is_training=False, random_latent=False,
tiny_mode=False, small_mode=False):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
time_axis: the time axis in images tensor
latent_channels: number of latent channels
min_logvar: minimum value for log_var
is_training: whether or not it is training mode
random_latent: whether or not generate random latents
tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number
of conv channels to 1 at each layer. useful for testing the
integration tests.
small_mode: whether or not it is small_mode. small mode is the same model
with less conv and lstm layers and also lower number of channels.
suitable for videos with less complexity and testing.
Returns:
latent_mean: predicted latent mean
latent_logvar: predicted latent log variance
"""
conv_size = tinyify([32, 64, 64], tiny_mode, small_mode)
with tf.variable_scope("latent", reuse=tf.AUTO_REUSE):
images = tf.to_float(images)
images = tf.unstack(images, axis=time_axis)
images = tf.concat(images, axis=3)
x = images
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv1")
x = tfcl.layer_norm(x)
if not small_mode:
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv2")
x = tfcl.layer_norm(x)
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="latent_conv3")
x = tfcl.layer_norm(x)
nc = latent_channels
mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=None, name="latent_mean")
logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_std")
logvar = logv + min_logvar
# No latent tower at inference time, just standard gaussian.
if not is_training:
return tf.zeros_like(mean), tf.zeros_like(logvar)
# No latent in the first phase
ret_mean, ret_logvar = tf.cond(
random_latent,
lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)),
lambda: (mean, logvar))
return ret_mean, ret_logvar
|
[
"def",
"conv_latent_tower",
"(",
"images",
",",
"time_axis",
",",
"latent_channels",
"=",
"1",
",",
"min_logvar",
"=",
"-",
"5",
",",
"is_training",
"=",
"False",
",",
"random_latent",
"=",
"False",
",",
"tiny_mode",
"=",
"False",
",",
"small_mode",
"=",
"False",
")",
":",
"conv_size",
"=",
"tinyify",
"(",
"[",
"32",
",",
"64",
",",
"64",
"]",
",",
"tiny_mode",
",",
"small_mode",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"latent\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"images",
"=",
"tf",
".",
"to_float",
"(",
"images",
")",
"images",
"=",
"tf",
".",
"unstack",
"(",
"images",
",",
"axis",
"=",
"time_axis",
")",
"images",
"=",
"tf",
".",
"concat",
"(",
"images",
",",
"axis",
"=",
"3",
")",
"x",
"=",
"images",
"x",
"=",
"common_layers",
".",
"make_even_size",
"(",
"x",
")",
"x",
"=",
"tfl",
".",
"conv2d",
"(",
"x",
",",
"conv_size",
"[",
"0",
"]",
",",
"[",
"3",
",",
"3",
"]",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"\"latent_conv1\"",
")",
"x",
"=",
"tfcl",
".",
"layer_norm",
"(",
"x",
")",
"if",
"not",
"small_mode",
":",
"x",
"=",
"tfl",
".",
"conv2d",
"(",
"x",
",",
"conv_size",
"[",
"1",
"]",
",",
"[",
"3",
",",
"3",
"]",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"\"latent_conv2\"",
")",
"x",
"=",
"tfcl",
".",
"layer_norm",
"(",
"x",
")",
"x",
"=",
"tfl",
".",
"conv2d",
"(",
"x",
",",
"conv_size",
"[",
"2",
"]",
",",
"[",
"3",
",",
"3",
"]",
",",
"strides",
"=",
"(",
"1",
",",
"1",
")",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"\"latent_conv3\"",
")",
"x",
"=",
"tfcl",
".",
"layer_norm",
"(",
"x",
")",
"nc",
"=",
"latent_channels",
"mean",
"=",
"tfl",
".",
"conv2d",
"(",
"x",
",",
"nc",
",",
"[",
"3",
",",
"3",
"]",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"None",
",",
"name",
"=",
"\"latent_mean\"",
")",
"logv",
"=",
"tfl",
".",
"conv2d",
"(",
"x",
",",
"nc",
",",
"[",
"3",
",",
"3",
"]",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"padding",
"=",
"\"SAME\"",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"\"latent_std\"",
")",
"logvar",
"=",
"logv",
"+",
"min_logvar",
"# No latent tower at inference time, just standard gaussian.",
"if",
"not",
"is_training",
":",
"return",
"tf",
".",
"zeros_like",
"(",
"mean",
")",
",",
"tf",
".",
"zeros_like",
"(",
"logvar",
")",
"# No latent in the first phase",
"ret_mean",
",",
"ret_logvar",
"=",
"tf",
".",
"cond",
"(",
"random_latent",
",",
"lambda",
":",
"(",
"tf",
".",
"zeros_like",
"(",
"mean",
")",
",",
"tf",
".",
"zeros_like",
"(",
"logvar",
")",
")",
",",
"lambda",
":",
"(",
"mean",
",",
"logvar",
")",
")",
"return",
"ret_mean",
",",
"ret_logvar"
] |
Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
time_axis: the time axis in images tensor
latent_channels: number of latent channels
min_logvar: minimum value for log_var
is_training: whether or not it is training mode
random_latent: whether or not generate random latents
tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number
of conv channels to 1 at each layer. useful for testing the
integration tests.
small_mode: whether or not it is small_mode. small mode is the same model
with less conv and lstm layers and also lower number of channels.
suitable for videos with less complexity and testing.
Returns:
latent_mean: predicted latent mean
latent_logvar: predicted latent log variance
|
[
"Builds",
"convolutional",
"latent",
"tower",
"for",
"stochastic",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L516-L582
|
21,854
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
extract_random_video_patch
|
def extract_random_video_patch(videos, num_frames=-1):
"""For every video, extract a random consecutive patch of num_frames.
Args:
videos: 5-D Tensor, (NTHWC)
num_frames: Integer, if -1 then the entire video is returned.
Returns:
video_patch: 5-D Tensor, (NTHWC) with T = num_frames.
Raises:
ValueError: If num_frames is greater than the number of total frames in
the video.
"""
if num_frames == -1:
return videos
batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)
if num_total_frames < num_frames:
raise ValueError("Expected num_frames <= %d, got %d" %
(num_total_frames, num_frames))
# Randomly choose start_inds for each video.
frame_start = tf.random_uniform(
shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1,
dtype=tf.int32)
# [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ...
# [start[batch_size-1], ... start[batch_size-1] + num_frames - 1]
range_inds = tf.expand_dims(tf.range(num_frames), axis=0)
frame_inds = range_inds + tf.expand_dims(frame_start, axis=1)
frame_inds = tf.reshape(frame_inds, [-1])
# [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames
batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)
batch_inds = tf.tile(batch_inds, [1, num_frames])
batch_inds = tf.reshape(batch_inds, [-1])
gather_inds = tf.stack((batch_inds, frame_inds), axis=1)
video_patches = tf.gather_nd(videos, gather_inds)
return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))
|
python
|
def extract_random_video_patch(videos, num_frames=-1):
"""For every video, extract a random consecutive patch of num_frames.
Args:
videos: 5-D Tensor, (NTHWC)
num_frames: Integer, if -1 then the entire video is returned.
Returns:
video_patch: 5-D Tensor, (NTHWC) with T = num_frames.
Raises:
ValueError: If num_frames is greater than the number of total frames in
the video.
"""
if num_frames == -1:
return videos
batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)
if num_total_frames < num_frames:
raise ValueError("Expected num_frames <= %d, got %d" %
(num_total_frames, num_frames))
# Randomly choose start_inds for each video.
frame_start = tf.random_uniform(
shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1,
dtype=tf.int32)
# [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ...
# [start[batch_size-1], ... start[batch_size-1] + num_frames - 1]
range_inds = tf.expand_dims(tf.range(num_frames), axis=0)
frame_inds = range_inds + tf.expand_dims(frame_start, axis=1)
frame_inds = tf.reshape(frame_inds, [-1])
# [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames
batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)
batch_inds = tf.tile(batch_inds, [1, num_frames])
batch_inds = tf.reshape(batch_inds, [-1])
gather_inds = tf.stack((batch_inds, frame_inds), axis=1)
video_patches = tf.gather_nd(videos, gather_inds)
return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))
|
[
"def",
"extract_random_video_patch",
"(",
"videos",
",",
"num_frames",
"=",
"-",
"1",
")",
":",
"if",
"num_frames",
"==",
"-",
"1",
":",
"return",
"videos",
"batch_size",
",",
"num_total_frames",
",",
"h",
",",
"w",
",",
"c",
"=",
"common_layers",
".",
"shape_list",
"(",
"videos",
")",
"if",
"num_total_frames",
"<",
"num_frames",
":",
"raise",
"ValueError",
"(",
"\"Expected num_frames <= %d, got %d\"",
"%",
"(",
"num_total_frames",
",",
"num_frames",
")",
")",
"# Randomly choose start_inds for each video.",
"frame_start",
"=",
"tf",
".",
"random_uniform",
"(",
"shape",
"=",
"(",
"batch_size",
",",
")",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"num_total_frames",
"-",
"num_frames",
"+",
"1",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"# [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ...",
"# [start[batch_size-1], ... start[batch_size-1] + num_frames - 1]",
"range_inds",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"num_frames",
")",
",",
"axis",
"=",
"0",
")",
"frame_inds",
"=",
"range_inds",
"+",
"tf",
".",
"expand_dims",
"(",
"frame_start",
",",
"axis",
"=",
"1",
")",
"frame_inds",
"=",
"tf",
".",
"reshape",
"(",
"frame_inds",
",",
"[",
"-",
"1",
"]",
")",
"# [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames",
"batch_inds",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"batch_size",
")",
",",
"axis",
"=",
"1",
")",
"batch_inds",
"=",
"tf",
".",
"tile",
"(",
"batch_inds",
",",
"[",
"1",
",",
"num_frames",
"]",
")",
"batch_inds",
"=",
"tf",
".",
"reshape",
"(",
"batch_inds",
",",
"[",
"-",
"1",
"]",
")",
"gather_inds",
"=",
"tf",
".",
"stack",
"(",
"(",
"batch_inds",
",",
"frame_inds",
")",
",",
"axis",
"=",
"1",
")",
"video_patches",
"=",
"tf",
".",
"gather_nd",
"(",
"videos",
",",
"gather_inds",
")",
"return",
"tf",
".",
"reshape",
"(",
"video_patches",
",",
"(",
"batch_size",
",",
"num_frames",
",",
"h",
",",
"w",
",",
"c",
")",
")"
] |
For every video, extract a random consecutive patch of num_frames.
Args:
videos: 5-D Tensor, (NTHWC)
num_frames: Integer, if -1 then the entire video is returned.
Returns:
video_patch: 5-D Tensor, (NTHWC) with T = num_frames.
Raises:
ValueError: If num_frames is greater than the number of total frames in
the video.
|
[
"For",
"every",
"video",
"extract",
"a",
"random",
"consecutive",
"patch",
"of",
"num_frames",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L621-L658
|
21,855
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
VideoWriter.write_multi
|
def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame)
|
python
|
def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame)
|
[
"def",
"write_multi",
"(",
"self",
",",
"frames",
",",
"encoded_frames",
"=",
"None",
")",
":",
"if",
"encoded_frames",
"is",
"None",
":",
"# Infinite iterator.",
"encoded_frames",
"=",
"iter",
"(",
"lambda",
":",
"None",
",",
"1",
")",
"for",
"(",
"frame",
",",
"encoded_frame",
")",
"in",
"zip",
"(",
"frames",
",",
"encoded_frames",
")",
":",
"self",
".",
"write",
"(",
"frame",
",",
"encoded_frame",
")"
] |
Writes multiple video frames.
|
[
"Writes",
"multiple",
"video",
"frames",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L668-L674
|
21,856
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
WholeVideoWriter.__init_ffmpeg
|
def __init_ffmpeg(self, image_shape):
"""Initializes ffmpeg to write frames."""
import itertools # pylint: disable=g-import-not-at-top
from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member
ffmpeg = "ffmpeg"
height, width, channels = image_shape
self.cmd = [
ffmpeg, "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-r", "%.02f" % self.fps,
"-s", "%dx%d" % (width, height),
"-pix_fmt", {1: "gray", 3: "rgb24"}[channels],
"-i", "-",
"-filter_complex", "[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];"
"[w][y]paletteuse,fifo",
"-r", "%.02f" % self.fps,
"-f", self.file_format,
"-qscale", "0",
"-"
]
self.proc = Popen(
self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1
)
(self._out_thread, self._err_thread) = itertools.starmap(
self._start_reader_thread, [
(self.proc.stdout, self._out_chunks),
(self.proc.stderr, self._err_chunks)
]
)
|
python
|
def __init_ffmpeg(self, image_shape):
"""Initializes ffmpeg to write frames."""
import itertools # pylint: disable=g-import-not-at-top
from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member
ffmpeg = "ffmpeg"
height, width, channels = image_shape
self.cmd = [
ffmpeg, "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-r", "%.02f" % self.fps,
"-s", "%dx%d" % (width, height),
"-pix_fmt", {1: "gray", 3: "rgb24"}[channels],
"-i", "-",
"-filter_complex", "[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];"
"[w][y]paletteuse,fifo",
"-r", "%.02f" % self.fps,
"-f", self.file_format,
"-qscale", "0",
"-"
]
self.proc = Popen(
self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1
)
(self._out_thread, self._err_thread) = itertools.starmap(
self._start_reader_thread, [
(self.proc.stdout, self._out_chunks),
(self.proc.stderr, self._err_chunks)
]
)
|
[
"def",
"__init_ffmpeg",
"(",
"self",
",",
"image_shape",
")",
":",
"import",
"itertools",
"# pylint: disable=g-import-not-at-top",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"# pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member",
"ffmpeg",
"=",
"\"ffmpeg\"",
"height",
",",
"width",
",",
"channels",
"=",
"image_shape",
"self",
".",
"cmd",
"=",
"[",
"ffmpeg",
",",
"\"-y\"",
",",
"\"-f\"",
",",
"\"rawvideo\"",
",",
"\"-vcodec\"",
",",
"\"rawvideo\"",
",",
"\"-r\"",
",",
"\"%.02f\"",
"%",
"self",
".",
"fps",
",",
"\"-s\"",
",",
"\"%dx%d\"",
"%",
"(",
"width",
",",
"height",
")",
",",
"\"-pix_fmt\"",
",",
"{",
"1",
":",
"\"gray\"",
",",
"3",
":",
"\"rgb24\"",
"}",
"[",
"channels",
"]",
",",
"\"-i\"",
",",
"\"-\"",
",",
"\"-filter_complex\"",
",",
"\"[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];\"",
"\"[w][y]paletteuse,fifo\"",
",",
"\"-r\"",
",",
"\"%.02f\"",
"%",
"self",
".",
"fps",
",",
"\"-f\"",
",",
"self",
".",
"file_format",
",",
"\"-qscale\"",
",",
"\"0\"",
",",
"\"-\"",
"]",
"self",
".",
"proc",
"=",
"Popen",
"(",
"self",
".",
"cmd",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"bufsize",
"=",
"-",
"1",
")",
"(",
"self",
".",
"_out_thread",
",",
"self",
".",
"_err_thread",
")",
"=",
"itertools",
".",
"starmap",
"(",
"self",
".",
"_start_reader_thread",
",",
"[",
"(",
"self",
".",
"proc",
".",
"stdout",
",",
"self",
".",
"_out_chunks",
")",
",",
"(",
"self",
".",
"proc",
".",
"stderr",
",",
"self",
".",
"_err_chunks",
")",
"]",
")"
] |
Initializes ffmpeg to write frames.
|
[
"Initializes",
"ffmpeg",
"to",
"write",
"frames",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L715-L744
|
21,857
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
WholeVideoWriter._start_reader_thread
|
def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread
|
python
|
def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread
|
[
"def",
"_start_reader_thread",
"(",
"self",
",",
"stream",
",",
"chunks",
")",
":",
"import",
"io",
"# pylint: disable=g-import-not-at-top",
"import",
"threading",
"# pylint: disable=g-import-not-at-top",
"def",
"target",
"(",
")",
":",
"while",
"True",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"if",
"not",
"chunk",
":",
"break",
"chunks",
".",
"append",
"(",
"chunk",
")",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"target",
")",
"thread",
".",
"start",
"(",
")",
"return",
"thread"
] |
Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
|
[
"Starts",
"a",
"thread",
"for",
"reading",
"output",
"from",
"FFMPEG",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L746-L769
|
21,858
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_video.py
|
WholeVideoWriter.finish
|
def finish(self):
"""Finishes transconding and returns the video.
Returns:
bytes
Raises:
IOError: in case of transcoding error.
"""
if self.proc is None:
return None
self.proc.stdin.close()
for thread in (self._out_thread, self._err_thread):
thread.join()
(out, err) = [
b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks)
]
self.proc.stdout.close()
self.proc.stderr.close()
if self.proc.returncode:
err = "\n".join([" ".join(self.cmd), err.decode("utf8")])
raise IOError(err)
del self.proc
self.proc = None
return out
|
python
|
def finish(self):
"""Finishes transconding and returns the video.
Returns:
bytes
Raises:
IOError: in case of transcoding error.
"""
if self.proc is None:
return None
self.proc.stdin.close()
for thread in (self._out_thread, self._err_thread):
thread.join()
(out, err) = [
b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks)
]
self.proc.stdout.close()
self.proc.stderr.close()
if self.proc.returncode:
err = "\n".join([" ".join(self.cmd), err.decode("utf8")])
raise IOError(err)
del self.proc
self.proc = None
return out
|
[
"def",
"finish",
"(",
"self",
")",
":",
"if",
"self",
".",
"proc",
"is",
"None",
":",
"return",
"None",
"self",
".",
"proc",
".",
"stdin",
".",
"close",
"(",
")",
"for",
"thread",
"in",
"(",
"self",
".",
"_out_thread",
",",
"self",
".",
"_err_thread",
")",
":",
"thread",
".",
"join",
"(",
")",
"(",
"out",
",",
"err",
")",
"=",
"[",
"b\"\"",
".",
"join",
"(",
"chunks",
")",
"for",
"chunks",
"in",
"(",
"self",
".",
"_out_chunks",
",",
"self",
".",
"_err_chunks",
")",
"]",
"self",
".",
"proc",
".",
"stdout",
".",
"close",
"(",
")",
"self",
".",
"proc",
".",
"stderr",
".",
"close",
"(",
")",
"if",
"self",
".",
"proc",
".",
"returncode",
":",
"err",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\" \"",
".",
"join",
"(",
"self",
".",
"cmd",
")",
",",
"err",
".",
"decode",
"(",
"\"utf8\"",
")",
"]",
")",
"raise",
"IOError",
"(",
"err",
")",
"del",
"self",
".",
"proc",
"self",
".",
"proc",
"=",
"None",
"return",
"out"
] |
Finishes transconding and returns the video.
Returns:
bytes
Raises:
IOError: in case of transcoding error.
|
[
"Finishes",
"transconding",
"and",
"returns",
"the",
"video",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L776-L800
|
21,859
|
tensorflow/tensor2tensor
|
tensor2tensor/serving/query.py
|
validate_flags
|
def validate_flags():
"""Validates flags are set to acceptable values."""
if FLAGS.cloud_mlengine_model_name:
assert not FLAGS.server
assert not FLAGS.servable_name
else:
assert FLAGS.server
assert FLAGS.servable_name
|
python
|
def validate_flags():
"""Validates flags are set to acceptable values."""
if FLAGS.cloud_mlengine_model_name:
assert not FLAGS.server
assert not FLAGS.servable_name
else:
assert FLAGS.server
assert FLAGS.servable_name
|
[
"def",
"validate_flags",
"(",
")",
":",
"if",
"FLAGS",
".",
"cloud_mlengine_model_name",
":",
"assert",
"not",
"FLAGS",
".",
"server",
"assert",
"not",
"FLAGS",
".",
"servable_name",
"else",
":",
"assert",
"FLAGS",
".",
"server",
"assert",
"FLAGS",
".",
"servable_name"
] |
Validates flags are set to acceptable values.
|
[
"Validates",
"flags",
"are",
"set",
"to",
"acceptable",
"values",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/query.py#L53-L60
|
21,860
|
tensorflow/tensor2tensor
|
tensor2tensor/serving/query.py
|
make_request_fn
|
def make_request_fn():
"""Returns a request function."""
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mlengine_model_version)
else:
request_fn = serving_utils.make_grpc_request_fn(
servable_name=FLAGS.servable_name,
server=FLAGS.server,
timeout_secs=FLAGS.timeout_secs)
return request_fn
|
python
|
def make_request_fn():
"""Returns a request function."""
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mlengine_model_version)
else:
request_fn = serving_utils.make_grpc_request_fn(
servable_name=FLAGS.servable_name,
server=FLAGS.server,
timeout_secs=FLAGS.timeout_secs)
return request_fn
|
[
"def",
"make_request_fn",
"(",
")",
":",
"if",
"FLAGS",
".",
"cloud_mlengine_model_name",
":",
"request_fn",
"=",
"serving_utils",
".",
"make_cloud_mlengine_request_fn",
"(",
"credentials",
"=",
"GoogleCredentials",
".",
"get_application_default",
"(",
")",
",",
"model_name",
"=",
"FLAGS",
".",
"cloud_mlengine_model_name",
",",
"version",
"=",
"FLAGS",
".",
"cloud_mlengine_model_version",
")",
"else",
":",
"request_fn",
"=",
"serving_utils",
".",
"make_grpc_request_fn",
"(",
"servable_name",
"=",
"FLAGS",
".",
"servable_name",
",",
"server",
"=",
"FLAGS",
".",
"server",
",",
"timeout_secs",
"=",
"FLAGS",
".",
"timeout_secs",
")",
"return",
"request_fn"
] |
Returns a request function.
|
[
"Returns",
"a",
"request",
"function",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/query.py#L63-L76
|
21,861
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.encoder
|
def encoder(self, inputs, n_layers=3):
"""Convnet that encodes inputs into mean and std of a gaussian.
Args:
inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)
n_layers: Number of layers.
Returns:
z_mu: Mean of the latent gaussians.
z_log_var: log(var) of the latent gaussians.
Raises:
ValueError: If inputs is not a 5-D tensor or not float32.
"""
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if inputs.dtype != tf.float32:
raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype)
# Flatten (N,T,W,H,C) into (NT,W,H,C)
batch_size, _ = shape_as_list[:2]
inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])
n_filters = 64
rectified = None
# Applies 3 layer conv-net with padding, instance normalization
# and leaky relu as per the encoder in
# https://github.com/alexlee-gk/video_prediction
padding = [[0, 0], [1, 1], [1, 1], [0, 0]]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (i + 1)):
n_filters *= 2**i
if i:
padded = tf.pad(rectified, padding)
else:
padded = tf.pad(inputs, padding)
convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,
strides=2, padding="VALID")
normalized = tf.contrib.layers.instance_norm(convolved)
rectified = tf.nn.leaky_relu(normalized, alpha=0.2)
# Mean pooling across all spatial dimensions.
pooled = tf.nn.avg_pool(
rectified, [1] + rectified.shape[1:3].as_list() + [1],
strides=[1, 1, 1, 1], padding="VALID")
squeezed = tf.squeeze(pooled, [1, 2])
# Down-project and output the mean and log of the standard deviation of
# the latents.
with tf.variable_scope("z_mu"):
z_mu = tf.layers.dense(squeezed, latent_dims)
with tf.variable_scope("z_log_sigma_sq"):
z_log_var = tf.layers.dense(squeezed, latent_dims)
z_log_var = tf.clip_by_value(z_log_var, -10, 10)
# Reshape to (batch_size X num_frames X latent_dims)
z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))
z_log_var = tf.reshape(
z_log_var, (batch_size, -1, latent_dims))
return z_mu, z_log_var
|
python
|
def encoder(self, inputs, n_layers=3):
"""Convnet that encodes inputs into mean and std of a gaussian.
Args:
inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)
n_layers: Number of layers.
Returns:
z_mu: Mean of the latent gaussians.
z_log_var: log(var) of the latent gaussians.
Raises:
ValueError: If inputs is not a 5-D tensor or not float32.
"""
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if inputs.dtype != tf.float32:
raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype)
# Flatten (N,T,W,H,C) into (NT,W,H,C)
batch_size, _ = shape_as_list[:2]
inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])
n_filters = 64
rectified = None
# Applies 3 layer conv-net with padding, instance normalization
# and leaky relu as per the encoder in
# https://github.com/alexlee-gk/video_prediction
padding = [[0, 0], [1, 1], [1, 1], [0, 0]]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (i + 1)):
n_filters *= 2**i
if i:
padded = tf.pad(rectified, padding)
else:
padded = tf.pad(inputs, padding)
convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,
strides=2, padding="VALID")
normalized = tf.contrib.layers.instance_norm(convolved)
rectified = tf.nn.leaky_relu(normalized, alpha=0.2)
# Mean pooling across all spatial dimensions.
pooled = tf.nn.avg_pool(
rectified, [1] + rectified.shape[1:3].as_list() + [1],
strides=[1, 1, 1, 1], padding="VALID")
squeezed = tf.squeeze(pooled, [1, 2])
# Down-project and output the mean and log of the standard deviation of
# the latents.
with tf.variable_scope("z_mu"):
z_mu = tf.layers.dense(squeezed, latent_dims)
with tf.variable_scope("z_log_sigma_sq"):
z_log_var = tf.layers.dense(squeezed, latent_dims)
z_log_var = tf.clip_by_value(z_log_var, -10, 10)
# Reshape to (batch_size X num_frames X latent_dims)
z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))
z_log_var = tf.reshape(
z_log_var, (batch_size, -1, latent_dims))
return z_mu, z_log_var
|
[
"def",
"encoder",
"(",
"self",
",",
"inputs",
",",
"n_layers",
"=",
"3",
")",
":",
"latent_dims",
"=",
"self",
".",
"hparams",
".",
"z_dim",
"shape_as_list",
"=",
"inputs",
".",
"shape",
".",
"as_list",
"(",
")",
"if",
"len",
"(",
"shape_as_list",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"\"Expected inputs to be a 5-D, got %d\"",
"%",
"len",
"(",
"shape_as_list",
")",
")",
"if",
"inputs",
".",
"dtype",
"!=",
"tf",
".",
"float32",
":",
"raise",
"ValueError",
"(",
"\"Expected dtype tf.float32, got %s\"",
"%",
"inputs",
".",
"dtype",
")",
"# Flatten (N,T,W,H,C) into (NT,W,H,C)",
"batch_size",
",",
"_",
"=",
"shape_as_list",
"[",
":",
"2",
"]",
"inputs",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"[",
"-",
"1",
"]",
"+",
"list",
"(",
"inputs",
".",
"shape",
")",
"[",
"2",
":",
"]",
")",
"n_filters",
"=",
"64",
"rectified",
"=",
"None",
"# Applies 3 layer conv-net with padding, instance normalization",
"# and leaky relu as per the encoder in",
"# https://github.com/alexlee-gk/video_prediction",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"n_layers",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"layer_%d\"",
"%",
"(",
"i",
"+",
"1",
")",
")",
":",
"n_filters",
"*=",
"2",
"**",
"i",
"if",
"i",
":",
"padded",
"=",
"tf",
".",
"pad",
"(",
"rectified",
",",
"padding",
")",
"else",
":",
"padded",
"=",
"tf",
".",
"pad",
"(",
"inputs",
",",
"padding",
")",
"convolved",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"padded",
",",
"filters",
"=",
"n_filters",
",",
"kernel_size",
"=",
"4",
",",
"strides",
"=",
"2",
",",
"padding",
"=",
"\"VALID\"",
")",
"normalized",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"instance_norm",
"(",
"convolved",
")",
"rectified",
"=",
"tf",
".",
"nn",
".",
"leaky_relu",
"(",
"normalized",
",",
"alpha",
"=",
"0.2",
")",
"# Mean pooling across all spatial dimensions.",
"pooled",
"=",
"tf",
".",
"nn",
".",
"avg_pool",
"(",
"rectified",
",",
"[",
"1",
"]",
"+",
"rectified",
".",
"shape",
"[",
"1",
":",
"3",
"]",
".",
"as_list",
"(",
")",
"+",
"[",
"1",
"]",
",",
"strides",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"padding",
"=",
"\"VALID\"",
")",
"squeezed",
"=",
"tf",
".",
"squeeze",
"(",
"pooled",
",",
"[",
"1",
",",
"2",
"]",
")",
"# Down-project and output the mean and log of the standard deviation of",
"# the latents.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"z_mu\"",
")",
":",
"z_mu",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"squeezed",
",",
"latent_dims",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"z_log_sigma_sq\"",
")",
":",
"z_log_var",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"squeezed",
",",
"latent_dims",
")",
"z_log_var",
"=",
"tf",
".",
"clip_by_value",
"(",
"z_log_var",
",",
"-",
"10",
",",
"10",
")",
"# Reshape to (batch_size X num_frames X latent_dims)",
"z_mu",
"=",
"tf",
".",
"reshape",
"(",
"z_mu",
",",
"(",
"batch_size",
",",
"-",
"1",
",",
"latent_dims",
")",
")",
"z_log_var",
"=",
"tf",
".",
"reshape",
"(",
"z_log_var",
",",
"(",
"batch_size",
",",
"-",
"1",
",",
"latent_dims",
")",
")",
"return",
"z_mu",
",",
"z_log_var"
] |
Convnet that encodes inputs into mean and std of a gaussian.
Args:
inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)
n_layers: Number of layers.
Returns:
z_mu: Mean of the latent gaussians.
z_log_var: log(var) of the latent gaussians.
Raises:
ValueError: If inputs is not a 5-D tensor or not float32.
|
[
"Convnet",
"that",
"encodes",
"inputs",
"into",
"mean",
"and",
"std",
"of",
"a",
"gaussian",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L42-L105
|
21,862
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.get_fc_dimensions
|
def get_fc_dimensions(self, strides, kernel_sizes):
"""Get expected fully connected shape after a series of convolutions."""
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride, kernel_size in zip(strides, kernel_sizes):
output_shape = self.expected_output_shape(
output_shape, np.array(curr_stride), 1, kernel_size)
return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8
|
python
|
def get_fc_dimensions(self, strides, kernel_sizes):
"""Get expected fully connected shape after a series of convolutions."""
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride, kernel_size in zip(strides, kernel_sizes):
output_shape = self.expected_output_shape(
output_shape, np.array(curr_stride), 1, kernel_size)
return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8
|
[
"def",
"get_fc_dimensions",
"(",
"self",
",",
"strides",
",",
"kernel_sizes",
")",
":",
"output_height",
",",
"output_width",
",",
"_",
"=",
"self",
".",
"hparams",
".",
"problem",
".",
"frame_shape",
"output_steps",
"=",
"self",
".",
"hparams",
".",
"video_num_target_frames",
"output_shape",
"=",
"np",
".",
"array",
"(",
"[",
"output_steps",
",",
"output_height",
",",
"output_width",
"]",
")",
"for",
"curr_stride",
",",
"kernel_size",
"in",
"zip",
"(",
"strides",
",",
"kernel_sizes",
")",
":",
"output_shape",
"=",
"self",
".",
"expected_output_shape",
"(",
"output_shape",
",",
"np",
".",
"array",
"(",
"curr_stride",
")",
",",
"1",
",",
"kernel_size",
")",
"return",
"np",
".",
"prod",
"(",
"output_shape",
")",
"*",
"self",
".",
"hparams",
".",
"num_discriminator_filters",
"*",
"8"
] |
Get expected fully connected shape after a series of convolutions.
|
[
"Get",
"expected",
"fully",
"connected",
"shape",
"after",
"a",
"series",
"of",
"convolutions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L110-L118
|
21,863
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.discriminator
|
def discriminator(self, frames):
"""3-D SNGAN discriminator.
Args:
frames: a list of batch-major tensors indexed by time.
Returns:
logits: 1-D Tensor with shape=batch_size.
Positive logits imply that the discriminator thinks that it
belongs to the true class.
"""
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
# Switch from time-major axis to batch-major axis.
frames = common_video.swap_time_and_batch_axes(frames)
# 3-D Conv-net mapping inputs to activations.
num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]
kernel_sizes = [3, 4, 3, 4, 3, 4, 3]
strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],
[2, 2, 2], [1, 1, 1]]
names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0",
"video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1",
"video_sn_conv3_0"]
iterable = zip(num_outputs, kernel_sizes, strides, names)
activations = frames
for num_filters, kernel_size, stride, name in iterable:
activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,
stride, name)
num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)
activations = tf.reshape(activations, (-1, num_fc_dimensions))
return tf.squeeze(tf.layers.dense(activations, 1))
|
python
|
def discriminator(self, frames):
"""3-D SNGAN discriminator.
Args:
frames: a list of batch-major tensors indexed by time.
Returns:
logits: 1-D Tensor with shape=batch_size.
Positive logits imply that the discriminator thinks that it
belongs to the true class.
"""
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
# Switch from time-major axis to batch-major axis.
frames = common_video.swap_time_and_batch_axes(frames)
# 3-D Conv-net mapping inputs to activations.
num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]
kernel_sizes = [3, 4, 3, 4, 3, 4, 3]
strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],
[2, 2, 2], [1, 1, 1]]
names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0",
"video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1",
"video_sn_conv3_0"]
iterable = zip(num_outputs, kernel_sizes, strides, names)
activations = frames
for num_filters, kernel_size, stride, name in iterable:
activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,
stride, name)
num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)
activations = tf.reshape(activations, (-1, num_fc_dimensions))
return tf.squeeze(tf.layers.dense(activations, 1))
|
[
"def",
"discriminator",
"(",
"self",
",",
"frames",
")",
":",
"ndf",
"=",
"self",
".",
"hparams",
".",
"num_discriminator_filters",
"frames",
"=",
"tf",
".",
"stack",
"(",
"frames",
")",
"# Switch from time-major axis to batch-major axis.",
"frames",
"=",
"common_video",
".",
"swap_time_and_batch_axes",
"(",
"frames",
")",
"# 3-D Conv-net mapping inputs to activations.",
"num_outputs",
"=",
"[",
"ndf",
",",
"ndf",
"*",
"2",
",",
"ndf",
"*",
"2",
",",
"ndf",
"*",
"4",
",",
"ndf",
"*",
"4",
",",
"ndf",
"*",
"8",
",",
"ndf",
"*",
"8",
"]",
"kernel_sizes",
"=",
"[",
"3",
",",
"4",
",",
"3",
",",
"4",
",",
"3",
",",
"4",
",",
"3",
"]",
"strides",
"=",
"[",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
",",
"2",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
",",
"2",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"2",
",",
"2",
",",
"2",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
"]",
"names",
"=",
"[",
"\"video_sn_conv0_0\"",
",",
"\"video_sn_conv0_1\"",
",",
"\"video_sn_conv1_0\"",
",",
"\"video_sn_conv1_1\"",
",",
"\"video_sn_conv2_0\"",
",",
"\"video_sn_conv2_1\"",
",",
"\"video_sn_conv3_0\"",
"]",
"iterable",
"=",
"zip",
"(",
"num_outputs",
",",
"kernel_sizes",
",",
"strides",
",",
"names",
")",
"activations",
"=",
"frames",
"for",
"num_filters",
",",
"kernel_size",
",",
"stride",
",",
"name",
"in",
"iterable",
":",
"activations",
"=",
"self",
".",
"pad_conv3d_lrelu",
"(",
"activations",
",",
"num_filters",
",",
"kernel_size",
",",
"stride",
",",
"name",
")",
"num_fc_dimensions",
"=",
"self",
".",
"get_fc_dimensions",
"(",
"strides",
",",
"kernel_sizes",
")",
"activations",
"=",
"tf",
".",
"reshape",
"(",
"activations",
",",
"(",
"-",
"1",
",",
"num_fc_dimensions",
")",
")",
"return",
"tf",
".",
"squeeze",
"(",
"tf",
".",
"layers",
".",
"dense",
"(",
"activations",
",",
"1",
")",
")"
] |
3-D SNGAN discriminator.
Args:
frames: a list of batch-major tensors indexed by time.
Returns:
logits: 1-D Tensor with shape=batch_size.
Positive logits imply that the discriminator thinks that it
belongs to the true class.
|
[
"3",
"-",
"D",
"SNGAN",
"discriminator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L120-L153
|
21,864
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.d_step
|
def d_step(self, true_frames, gen_frames):
"""Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
"""
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
# Concat across batch-axis.
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop
|
python
|
def d_step(self, true_frames, gen_frames):
"""Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
"""
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
# Concat across batch-axis.
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop
|
[
"def",
"d_step",
"(",
"self",
",",
"true_frames",
",",
"gen_frames",
")",
":",
"hparam_to_disc_loss",
"=",
"{",
"\"least_squares\"",
":",
"gan_losses",
".",
"least_squares_discriminator_loss",
",",
"\"cross_entropy\"",
":",
"gan_losses",
".",
"modified_discriminator_loss",
",",
"\"wasserstein\"",
":",
"gan_losses",
".",
"wasserstein_discriminator_loss",
"}",
"# Concat across batch-axis.",
"_",
",",
"batch_size",
",",
"_",
",",
"_",
",",
"_",
"=",
"common_layers",
".",
"shape_list",
"(",
"true_frames",
")",
"all_frames",
"=",
"tf",
".",
"concat",
"(",
"[",
"true_frames",
",",
"tf",
".",
"stop_gradient",
"(",
"gen_frames",
")",
"]",
",",
"axis",
"=",
"1",
")",
"all_logits",
"=",
"self",
".",
"discriminator",
"(",
"all_frames",
")",
"true_logits",
",",
"fake_logits_stop",
"=",
"all_logits",
"[",
":",
"batch_size",
"]",
",",
"all_logits",
"[",
"batch_size",
":",
"]",
"mean_true_logits",
"=",
"tf",
".",
"reduce_mean",
"(",
"true_logits",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"mean_true_logits\"",
",",
"mean_true_logits",
")",
"mean_fake_logits_stop",
"=",
"tf",
".",
"reduce_mean",
"(",
"fake_logits_stop",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"mean_fake_logits_stop\"",
",",
"mean_fake_logits_stop",
")",
"discriminator_loss_func",
"=",
"hparam_to_disc_loss",
"[",
"self",
".",
"hparams",
".",
"gan_loss",
"]",
"gan_d_loss",
"=",
"discriminator_loss_func",
"(",
"discriminator_real_outputs",
"=",
"true_logits",
",",
"discriminator_gen_outputs",
"=",
"fake_logits_stop",
",",
"add_summaries",
"=",
"True",
")",
"return",
"gan_d_loss",
",",
"true_logits",
",",
"fake_logits_stop"
] |
Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
|
[
"Performs",
"the",
"discriminator",
"step",
"in",
"computing",
"the",
"GAN",
"loss",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L155-L192
|
21,865
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.g_step
|
def g_step(self, gen_frames, fake_logits_stop):
"""Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
"""
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
# Generator loss.
# Using gan_g_loss_pos_d updates the discriminator as well.
# To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d
# but with stop gradient on the generator.
# This makes sure that the net gradient on the discriminator is zero and
# net-gradient on the generator is just due to the gan_g_loss_pos_d.
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d
|
python
|
def g_step(self, gen_frames, fake_logits_stop):
"""Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
"""
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
# Generator loss.
# Using gan_g_loss_pos_d updates the discriminator as well.
# To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d
# but with stop gradient on the generator.
# This makes sure that the net gradient on the discriminator is zero and
# net-gradient on the generator is just due to the gan_g_loss_pos_d.
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d
|
[
"def",
"g_step",
"(",
"self",
",",
"gen_frames",
",",
"fake_logits_stop",
")",
":",
"hparam_to_gen_loss",
"=",
"{",
"\"least_squares\"",
":",
"gan_losses",
".",
"least_squares_generator_loss",
",",
"\"cross_entropy\"",
":",
"gan_losses",
".",
"modified_generator_loss",
",",
"\"wasserstein\"",
":",
"gan_losses",
".",
"wasserstein_generator_loss",
"}",
"fake_logits",
"=",
"self",
".",
"discriminator",
"(",
"gen_frames",
")",
"mean_fake_logits",
"=",
"tf",
".",
"reduce_mean",
"(",
"fake_logits",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"mean_fake_logits\"",
",",
"mean_fake_logits",
")",
"# Generator loss.",
"# Using gan_g_loss_pos_d updates the discriminator as well.",
"# To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d",
"# but with stop gradient on the generator.",
"# This makes sure that the net gradient on the discriminator is zero and",
"# net-gradient on the generator is just due to the gan_g_loss_pos_d.",
"generator_loss_func",
"=",
"hparam_to_gen_loss",
"[",
"self",
".",
"hparams",
".",
"gan_loss",
"]",
"gan_g_loss_pos_d",
"=",
"generator_loss_func",
"(",
"discriminator_gen_outputs",
"=",
"fake_logits",
",",
"add_summaries",
"=",
"True",
")",
"gan_g_loss_neg_d",
"=",
"-",
"generator_loss_func",
"(",
"discriminator_gen_outputs",
"=",
"fake_logits_stop",
",",
"add_summaries",
"=",
"True",
")",
"return",
"gan_g_loss_pos_d",
",",
"gan_g_loss_neg_d"
] |
Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
|
[
"Performs",
"the",
"generator",
"step",
"in",
"computing",
"the",
"GAN",
"loss",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L194-L226
|
21,866
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.get_gan_loss
|
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
|
python
|
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
|
[
"def",
"get_gan_loss",
"(",
"self",
",",
"true_frames",
",",
"gen_frames",
",",
"name",
")",
":",
"# D - STEP",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%s_discriminator\"",
"%",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"gan_d_loss",
",",
"_",
",",
"fake_logits_stop",
"=",
"self",
".",
"d_step",
"(",
"true_frames",
",",
"gen_frames",
")",
"# G - STEP",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%s_discriminator\"",
"%",
"name",
",",
"reuse",
"=",
"True",
")",
":",
"gan_g_loss_pos_d",
",",
"gan_g_loss_neg_d",
"=",
"self",
".",
"g_step",
"(",
"gen_frames",
",",
"fake_logits_stop",
")",
"gan_g_loss",
"=",
"gan_g_loss_pos_d",
"+",
"gan_g_loss_neg_d",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"gan_loss_%s\"",
"%",
"name",
",",
"gan_g_loss_pos_d",
"+",
"gan_d_loss",
")",
"if",
"self",
".",
"hparams",
".",
"gan_optimization",
"==",
"\"joint\"",
":",
"gan_loss",
"=",
"gan_g_loss",
"+",
"gan_d_loss",
"else",
":",
"curr_step",
"=",
"self",
".",
"get_iteration_num",
"(",
")",
"gan_loss",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"logical_not",
"(",
"curr_step",
"%",
"2",
"==",
"0",
")",
",",
"lambda",
":",
"gan_g_loss",
",",
"lambda",
":",
"gan_d_loss",
")",
"return",
"gan_loss"
] |
Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
|
[
"Get",
"the",
"discriminator",
"+",
"generator",
"loss",
"at",
"every",
"step",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L228-L262
|
21,867
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.get_extra_loss
|
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
"""Gets extra loss from VAE and GAN."""
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
# Use sv2p's KL divergence computation.
if self.hparams.use_vae:
vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds)
if self.hparams.use_gan:
# Strip out the first context_frames for the true_frames
# Strip out the first context_frames - 1 for the gen_frames
context_frames = self.hparams.video_num_input_frames
true_frames = tf.stack(
tf.unstack(true_frames, axis=0)[context_frames:])
# discriminator for VAE.
if self.hparams.use_vae:
gen_enc_frames = tf.stack(
tf.unstack(gen_frames, axis=0)[context_frames-1:])
d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")
# discriminator for GAN.
gen_prior_frames = tf.stack(
tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")
return (
vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
self.hparams.gan_vae_loss_multiplier * d_vae_loss)
|
python
|
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
"""Gets extra loss from VAE and GAN."""
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
# Use sv2p's KL divergence computation.
if self.hparams.use_vae:
vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds)
if self.hparams.use_gan:
# Strip out the first context_frames for the true_frames
# Strip out the first context_frames - 1 for the gen_frames
context_frames = self.hparams.video_num_input_frames
true_frames = tf.stack(
tf.unstack(true_frames, axis=0)[context_frames:])
# discriminator for VAE.
if self.hparams.use_vae:
gen_enc_frames = tf.stack(
tf.unstack(gen_frames, axis=0)[context_frames-1:])
d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")
# discriminator for GAN.
gen_prior_frames = tf.stack(
tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")
return (
vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
self.hparams.gan_vae_loss_multiplier * d_vae_loss)
|
[
"def",
"get_extra_loss",
"(",
"self",
",",
"latent_means",
"=",
"None",
",",
"latent_stds",
"=",
"None",
",",
"true_frames",
"=",
"None",
",",
"gen_frames",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_training",
":",
"return",
"0.0",
"vae_loss",
",",
"d_vae_loss",
",",
"d_gan_loss",
"=",
"0.0",
",",
"0.0",
",",
"0.0",
"# Use sv2p's KL divergence computation.",
"if",
"self",
".",
"hparams",
".",
"use_vae",
":",
"vae_loss",
"=",
"super",
"(",
"NextFrameSavpBase",
",",
"self",
")",
".",
"get_extra_loss",
"(",
"latent_means",
"=",
"latent_means",
",",
"latent_stds",
"=",
"latent_stds",
")",
"if",
"self",
".",
"hparams",
".",
"use_gan",
":",
"# Strip out the first context_frames for the true_frames",
"# Strip out the first context_frames - 1 for the gen_frames",
"context_frames",
"=",
"self",
".",
"hparams",
".",
"video_num_input_frames",
"true_frames",
"=",
"tf",
".",
"stack",
"(",
"tf",
".",
"unstack",
"(",
"true_frames",
",",
"axis",
"=",
"0",
")",
"[",
"context_frames",
":",
"]",
")",
"# discriminator for VAE.",
"if",
"self",
".",
"hparams",
".",
"use_vae",
":",
"gen_enc_frames",
"=",
"tf",
".",
"stack",
"(",
"tf",
".",
"unstack",
"(",
"gen_frames",
",",
"axis",
"=",
"0",
")",
"[",
"context_frames",
"-",
"1",
":",
"]",
")",
"d_vae_loss",
"=",
"self",
".",
"get_gan_loss",
"(",
"true_frames",
",",
"gen_enc_frames",
",",
"name",
"=",
"\"vae\"",
")",
"# discriminator for GAN.",
"gen_prior_frames",
"=",
"tf",
".",
"stack",
"(",
"tf",
".",
"unstack",
"(",
"self",
".",
"gen_prior_video",
",",
"axis",
"=",
"0",
")",
"[",
"context_frames",
"-",
"1",
":",
"]",
")",
"d_gan_loss",
"=",
"self",
".",
"get_gan_loss",
"(",
"true_frames",
",",
"gen_prior_frames",
",",
"name",
"=",
"\"gan\"",
")",
"return",
"(",
"vae_loss",
"+",
"self",
".",
"hparams",
".",
"gan_loss_multiplier",
"*",
"d_gan_loss",
"+",
"self",
".",
"hparams",
".",
"gan_vae_loss_multiplier",
"*",
"d_vae_loss",
")"
] |
Gets extra loss from VAE and GAN.
|
[
"Gets",
"extra",
"loss",
"from",
"VAE",
"and",
"GAN",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L264-L296
|
21,868
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/savp.py
|
NextFrameSavpBase.pad_conv3d_lrelu
|
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
|
python
|
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
|
[
"def",
"pad_conv3d_lrelu",
"(",
"self",
",",
"activations",
",",
"n_filters",
",",
"kernel_size",
",",
"strides",
",",
"scope",
")",
":",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"# tf.nn.conv3d accepts a list of 5 values for strides",
"# with first and last value equal to 1",
"if",
"isinstance",
"(",
"strides",
",",
"numbers",
".",
"Integral",
")",
":",
"strides",
"=",
"[",
"strides",
"]",
"*",
"3",
"strides",
"=",
"[",
"1",
"]",
"+",
"strides",
"+",
"[",
"1",
"]",
"# Filter_shape = [K, K, K, num_input, num_output]",
"filter_shape",
"=",
"(",
"[",
"kernel_size",
"]",
"*",
"3",
"+",
"activations",
".",
"shape",
"[",
"-",
"1",
":",
"]",
".",
"as_list",
"(",
")",
"+",
"[",
"n_filters",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"conv_filter",
"=",
"tf",
".",
"get_variable",
"(",
"\"conv_filter\"",
",",
"shape",
"=",
"filter_shape",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"0.02",
")",
")",
"if",
"self",
".",
"hparams",
".",
"use_spectral_norm",
":",
"conv_filter",
",",
"assign_op",
"=",
"common_layers",
".",
"apply_spectral_norm",
"(",
"conv_filter",
")",
"if",
"self",
".",
"is_training",
":",
"tf",
".",
"add_to_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
",",
"assign_op",
")",
"padded",
"=",
"tf",
".",
"pad",
"(",
"activations",
",",
"padding",
")",
"convolved",
"=",
"tf",
".",
"nn",
".",
"conv3d",
"(",
"padded",
",",
"conv_filter",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"\"VALID\"",
")",
"rectified",
"=",
"tf",
".",
"nn",
".",
"leaky_relu",
"(",
"convolved",
",",
"alpha",
"=",
"0.2",
")",
"return",
"rectified"
] |
Pad, apply 3-D convolution and leaky relu.
|
[
"Pad",
"apply",
"3",
"-",
"D",
"convolution",
"and",
"leaky",
"relu",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L298-L327
|
21,869
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/pruning_utils.py
|
sparsify
|
def sparsify(sess, eval_model, pruning_strategy, pruning_params):
"""Prune the weights of a model and evaluate."""
weights = tf.trainable_variables()
def should_prune(name):
"""Whether to prune a weight or not."""
in_whitelist = not pruning_params.white_list or any(
e in name for e in pruning_params.white_list)
in_blacklist = any(e in name for e in pruning_params.black_list)
if pruning_params.white_list and not in_whitelist:
return False
elif in_blacklist:
return False
return True
weights = [w for w in weights if should_prune(w.name)]
tf.logging.info("Pruning weights: %s" % weights)
unpruned_weights = sess.run(weights)
reset_op = tf.no_op()
for w, ow in zip(weights, unpruned_weights):
op = tf.assign(w, ow)
reset_op = tf.group(reset_op, op)
for sparsity in pruning_params.sparsities:
set_weights_op = tf.no_op()
for w in weights:
op = tf.assign(w, pruning_strategy(w, sparsity))
set_weights_op = tf.group(set_weights_op, op)
sess.run(set_weights_op)
acc = eval_model()
tf.logging.info("\tPruning to sparsity = %f: acc = %f" % (sparsity, acc))
sess.run(reset_op)
|
python
|
def sparsify(sess, eval_model, pruning_strategy, pruning_params):
"""Prune the weights of a model and evaluate."""
weights = tf.trainable_variables()
def should_prune(name):
"""Whether to prune a weight or not."""
in_whitelist = not pruning_params.white_list or any(
e in name for e in pruning_params.white_list)
in_blacklist = any(e in name for e in pruning_params.black_list)
if pruning_params.white_list and not in_whitelist:
return False
elif in_blacklist:
return False
return True
weights = [w for w in weights if should_prune(w.name)]
tf.logging.info("Pruning weights: %s" % weights)
unpruned_weights = sess.run(weights)
reset_op = tf.no_op()
for w, ow in zip(weights, unpruned_weights):
op = tf.assign(w, ow)
reset_op = tf.group(reset_op, op)
for sparsity in pruning_params.sparsities:
set_weights_op = tf.no_op()
for w in weights:
op = tf.assign(w, pruning_strategy(w, sparsity))
set_weights_op = tf.group(set_weights_op, op)
sess.run(set_weights_op)
acc = eval_model()
tf.logging.info("\tPruning to sparsity = %f: acc = %f" % (sparsity, acc))
sess.run(reset_op)
|
[
"def",
"sparsify",
"(",
"sess",
",",
"eval_model",
",",
"pruning_strategy",
",",
"pruning_params",
")",
":",
"weights",
"=",
"tf",
".",
"trainable_variables",
"(",
")",
"def",
"should_prune",
"(",
"name",
")",
":",
"\"\"\"Whether to prune a weight or not.\"\"\"",
"in_whitelist",
"=",
"not",
"pruning_params",
".",
"white_list",
"or",
"any",
"(",
"e",
"in",
"name",
"for",
"e",
"in",
"pruning_params",
".",
"white_list",
")",
"in_blacklist",
"=",
"any",
"(",
"e",
"in",
"name",
"for",
"e",
"in",
"pruning_params",
".",
"black_list",
")",
"if",
"pruning_params",
".",
"white_list",
"and",
"not",
"in_whitelist",
":",
"return",
"False",
"elif",
"in_blacklist",
":",
"return",
"False",
"return",
"True",
"weights",
"=",
"[",
"w",
"for",
"w",
"in",
"weights",
"if",
"should_prune",
"(",
"w",
".",
"name",
")",
"]",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Pruning weights: %s\"",
"%",
"weights",
")",
"unpruned_weights",
"=",
"sess",
".",
"run",
"(",
"weights",
")",
"reset_op",
"=",
"tf",
".",
"no_op",
"(",
")",
"for",
"w",
",",
"ow",
"in",
"zip",
"(",
"weights",
",",
"unpruned_weights",
")",
":",
"op",
"=",
"tf",
".",
"assign",
"(",
"w",
",",
"ow",
")",
"reset_op",
"=",
"tf",
".",
"group",
"(",
"reset_op",
",",
"op",
")",
"for",
"sparsity",
"in",
"pruning_params",
".",
"sparsities",
":",
"set_weights_op",
"=",
"tf",
".",
"no_op",
"(",
")",
"for",
"w",
"in",
"weights",
":",
"op",
"=",
"tf",
".",
"assign",
"(",
"w",
",",
"pruning_strategy",
"(",
"w",
",",
"sparsity",
")",
")",
"set_weights_op",
"=",
"tf",
".",
"group",
"(",
"set_weights_op",
",",
"op",
")",
"sess",
".",
"run",
"(",
"set_weights_op",
")",
"acc",
"=",
"eval_model",
"(",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"\\tPruning to sparsity = %f: acc = %f\"",
"%",
"(",
"sparsity",
",",
"acc",
")",
")",
"sess",
".",
"run",
"(",
"reset_op",
")"
] |
Prune the weights of a model and evaluate.
|
[
"Prune",
"the",
"weights",
"of",
"a",
"model",
"and",
"evaluate",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/pruning_utils.py#L45-L80
|
21,870
|
tensorflow/tensor2tensor
|
tensor2tensor/insights/server.py
|
DebugFrontendApplication.load_config
|
def load_config(self):
"""Loads the configuration."""
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
|
python
|
def load_config(self):
"""Loads the configuration."""
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
|
[
"def",
"load_config",
"(",
"self",
")",
":",
"config",
"=",
"dict",
"(",
"[",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"self",
".",
"options",
")",
"if",
"key",
"in",
"self",
".",
"cfg",
".",
"settings",
"and",
"value",
"is",
"not",
"None",
"]",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"config",
")",
":",
"self",
".",
"cfg",
".",
"set",
"(",
"key",
".",
"lower",
"(",
")",
",",
"value",
")"
] |
Loads the configuration.
|
[
"Loads",
"the",
"configuration",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/insights/server.py#L79-L84
|
21,871
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
ppo_atari_base
|
def ppo_atari_base():
"""Pong base parameters."""
hparams = ppo_discrete_action_base()
hparams.learning_rate_constant = 1e-4
hparams.epoch_length = 200
hparams.gae_gamma = 0.985
hparams.gae_lambda = 0.985
hparams.entropy_loss_coef = 0.003
hparams.value_loss_coef = 1
hparams.optimization_epochs = 3
hparams.epochs_num = 1000
hparams.policy_network = "feed_forward_cnn_small_categorical_policy"
hparams.clipping_coef = 0.2
hparams.optimization_batch_size = 20
hparams.clip_grad_norm = 0.5
return hparams
|
python
|
def ppo_atari_base():
"""Pong base parameters."""
hparams = ppo_discrete_action_base()
hparams.learning_rate_constant = 1e-4
hparams.epoch_length = 200
hparams.gae_gamma = 0.985
hparams.gae_lambda = 0.985
hparams.entropy_loss_coef = 0.003
hparams.value_loss_coef = 1
hparams.optimization_epochs = 3
hparams.epochs_num = 1000
hparams.policy_network = "feed_forward_cnn_small_categorical_policy"
hparams.clipping_coef = 0.2
hparams.optimization_batch_size = 20
hparams.clip_grad_norm = 0.5
return hparams
|
[
"def",
"ppo_atari_base",
"(",
")",
":",
"hparams",
"=",
"ppo_discrete_action_base",
"(",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"1e-4",
"hparams",
".",
"epoch_length",
"=",
"200",
"hparams",
".",
"gae_gamma",
"=",
"0.985",
"hparams",
".",
"gae_lambda",
"=",
"0.985",
"hparams",
".",
"entropy_loss_coef",
"=",
"0.003",
"hparams",
".",
"value_loss_coef",
"=",
"1",
"hparams",
".",
"optimization_epochs",
"=",
"3",
"hparams",
".",
"epochs_num",
"=",
"1000",
"hparams",
".",
"policy_network",
"=",
"\"feed_forward_cnn_small_categorical_policy\"",
"hparams",
".",
"clipping_coef",
"=",
"0.2",
"hparams",
".",
"optimization_batch_size",
"=",
"20",
"hparams",
".",
"clip_grad_norm",
"=",
"0.5",
"return",
"hparams"
] |
Pong base parameters.
|
[
"Pong",
"base",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L100-L115
|
21,872
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
ppo_original_params
|
def ppo_original_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_atari_base()
hparams.learning_rate_constant = 2.5e-4
hparams.gae_gamma = 0.99
hparams.gae_lambda = 0.95
hparams.clipping_coef = 0.1
hparams.value_loss_coef = 1
hparams.entropy_loss_coef = 0.01
hparams.eval_every_epochs = 200
hparams.dropout_ppo = 0.1
# The parameters below are modified to accommodate short epoch_length (which
# is needed for model based rollouts).
hparams.epoch_length = 50
hparams.optimization_batch_size = 20
return hparams
|
python
|
def ppo_original_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_atari_base()
hparams.learning_rate_constant = 2.5e-4
hparams.gae_gamma = 0.99
hparams.gae_lambda = 0.95
hparams.clipping_coef = 0.1
hparams.value_loss_coef = 1
hparams.entropy_loss_coef = 0.01
hparams.eval_every_epochs = 200
hparams.dropout_ppo = 0.1
# The parameters below are modified to accommodate short epoch_length (which
# is needed for model based rollouts).
hparams.epoch_length = 50
hparams.optimization_batch_size = 20
return hparams
|
[
"def",
"ppo_original_params",
"(",
")",
":",
"hparams",
"=",
"ppo_atari_base",
"(",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"2.5e-4",
"hparams",
".",
"gae_gamma",
"=",
"0.99",
"hparams",
".",
"gae_lambda",
"=",
"0.95",
"hparams",
".",
"clipping_coef",
"=",
"0.1",
"hparams",
".",
"value_loss_coef",
"=",
"1",
"hparams",
".",
"entropy_loss_coef",
"=",
"0.01",
"hparams",
".",
"eval_every_epochs",
"=",
"200",
"hparams",
".",
"dropout_ppo",
"=",
"0.1",
"# The parameters below are modified to accommodate short epoch_length (which",
"# is needed for model based rollouts).",
"hparams",
".",
"epoch_length",
"=",
"50",
"hparams",
".",
"optimization_batch_size",
"=",
"20",
"return",
"hparams"
] |
Parameters based on the original PPO paper.
|
[
"Parameters",
"based",
"on",
"the",
"original",
"PPO",
"paper",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L119-L134
|
21,873
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
ppo_original_world_model_stochastic_discrete
|
def ppo_original_world_model_stochastic_discrete():
"""Atari parameters with stochastic discrete world model as policy."""
hparams = ppo_original_params()
hparams.policy_network = "next_frame_basic_stochastic_discrete"
hparams_keys = hparams.values().keys()
video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete()
for (name, value) in six.iteritems(video_hparams.values()):
if name in hparams_keys:
hparams.set_hparam(name, value)
else:
hparams.add_hparam(name, value)
# To avoid OOM. Probably way to small.
hparams.optimization_batch_size = 1
hparams.weight_decay = 0
return hparams
|
python
|
def ppo_original_world_model_stochastic_discrete():
"""Atari parameters with stochastic discrete world model as policy."""
hparams = ppo_original_params()
hparams.policy_network = "next_frame_basic_stochastic_discrete"
hparams_keys = hparams.values().keys()
video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete()
for (name, value) in six.iteritems(video_hparams.values()):
if name in hparams_keys:
hparams.set_hparam(name, value)
else:
hparams.add_hparam(name, value)
# To avoid OOM. Probably way to small.
hparams.optimization_batch_size = 1
hparams.weight_decay = 0
return hparams
|
[
"def",
"ppo_original_world_model_stochastic_discrete",
"(",
")",
":",
"hparams",
"=",
"ppo_original_params",
"(",
")",
"hparams",
".",
"policy_network",
"=",
"\"next_frame_basic_stochastic_discrete\"",
"hparams_keys",
"=",
"hparams",
".",
"values",
"(",
")",
".",
"keys",
"(",
")",
"video_hparams",
"=",
"basic_stochastic",
".",
"next_frame_basic_stochastic_discrete",
"(",
")",
"for",
"(",
"name",
",",
"value",
")",
"in",
"six",
".",
"iteritems",
"(",
"video_hparams",
".",
"values",
"(",
")",
")",
":",
"if",
"name",
"in",
"hparams_keys",
":",
"hparams",
".",
"set_hparam",
"(",
"name",
",",
"value",
")",
"else",
":",
"hparams",
".",
"add_hparam",
"(",
"name",
",",
"value",
")",
"# To avoid OOM. Probably way to small.",
"hparams",
".",
"optimization_batch_size",
"=",
"1",
"hparams",
".",
"weight_decay",
"=",
"0",
"return",
"hparams"
] |
Atari parameters with stochastic discrete world model as policy.
|
[
"Atari",
"parameters",
"with",
"stochastic",
"discrete",
"world",
"model",
"as",
"policy",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L205-L219
|
21,874
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
make_simulated_env_fn
|
def make_simulated_env_fn(**env_kwargs):
"""Returns a function creating a simulated env, in or out of graph.
Args:
**env_kwargs: kwargs to pass to the simulated env constructor.
Returns:
Function in_graph -> env.
"""
def env_fn(in_graph):
class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv
return class_(**env_kwargs)
return env_fn
|
python
|
def make_simulated_env_fn(**env_kwargs):
"""Returns a function creating a simulated env, in or out of graph.
Args:
**env_kwargs: kwargs to pass to the simulated env constructor.
Returns:
Function in_graph -> env.
"""
def env_fn(in_graph):
class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv
return class_(**env_kwargs)
return env_fn
|
[
"def",
"make_simulated_env_fn",
"(",
"*",
"*",
"env_kwargs",
")",
":",
"def",
"env_fn",
"(",
"in_graph",
")",
":",
"class_",
"=",
"SimulatedBatchEnv",
"if",
"in_graph",
"else",
"SimulatedBatchGymEnv",
"return",
"class_",
"(",
"*",
"*",
"env_kwargs",
")",
"return",
"env_fn"
] |
Returns a function creating a simulated env, in or out of graph.
Args:
**env_kwargs: kwargs to pass to the simulated env constructor.
Returns:
Function in_graph -> env.
|
[
"Returns",
"a",
"function",
"creating",
"a",
"simulated",
"env",
"in",
"or",
"out",
"of",
"graph",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L234-L246
|
21,875
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
make_simulated_env_kwargs
|
def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs):
"""Extracts simulated env kwargs from real_env and loop hparams."""
objs_and_attrs = [
(real_env, [
"reward_range", "observation_space", "action_space", "frame_height",
"frame_width"
]),
(hparams, ["frame_stack_size", "intrinsic_reward_scale"])
]
kwargs = {
attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension
for (obj, attrs) in objs_and_attrs for attr in attrs
}
kwargs["model_name"] = hparams.generative_model
kwargs["model_hparams"] = trainer_lib.create_hparams(
hparams.generative_model_params
)
if hparams.wm_policy_param_sharing:
kwargs["model_hparams"].optimizer_zero_grads = True
kwargs.update(extra_kwargs)
return kwargs
|
python
|
def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs):
"""Extracts simulated env kwargs from real_env and loop hparams."""
objs_and_attrs = [
(real_env, [
"reward_range", "observation_space", "action_space", "frame_height",
"frame_width"
]),
(hparams, ["frame_stack_size", "intrinsic_reward_scale"])
]
kwargs = {
attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension
for (obj, attrs) in objs_and_attrs for attr in attrs
}
kwargs["model_name"] = hparams.generative_model
kwargs["model_hparams"] = trainer_lib.create_hparams(
hparams.generative_model_params
)
if hparams.wm_policy_param_sharing:
kwargs["model_hparams"].optimizer_zero_grads = True
kwargs.update(extra_kwargs)
return kwargs
|
[
"def",
"make_simulated_env_kwargs",
"(",
"real_env",
",",
"hparams",
",",
"*",
"*",
"extra_kwargs",
")",
":",
"objs_and_attrs",
"=",
"[",
"(",
"real_env",
",",
"[",
"\"reward_range\"",
",",
"\"observation_space\"",
",",
"\"action_space\"",
",",
"\"frame_height\"",
",",
"\"frame_width\"",
"]",
")",
",",
"(",
"hparams",
",",
"[",
"\"frame_stack_size\"",
",",
"\"intrinsic_reward_scale\"",
"]",
")",
"]",
"kwargs",
"=",
"{",
"attr",
":",
"getattr",
"(",
"obj",
",",
"attr",
")",
"# pylint: disable=g-complex-comprehension",
"for",
"(",
"obj",
",",
"attrs",
")",
"in",
"objs_and_attrs",
"for",
"attr",
"in",
"attrs",
"}",
"kwargs",
"[",
"\"model_name\"",
"]",
"=",
"hparams",
".",
"generative_model",
"kwargs",
"[",
"\"model_hparams\"",
"]",
"=",
"trainer_lib",
".",
"create_hparams",
"(",
"hparams",
".",
"generative_model_params",
")",
"if",
"hparams",
".",
"wm_policy_param_sharing",
":",
"kwargs",
"[",
"\"model_hparams\"",
"]",
".",
"optimizer_zero_grads",
"=",
"True",
"kwargs",
".",
"update",
"(",
"extra_kwargs",
")",
"return",
"kwargs"
] |
Extracts simulated env kwargs from real_env and loop hparams.
|
[
"Extracts",
"simulated",
"env",
"kwargs",
"from",
"real_env",
"and",
"loop",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L250-L270
|
21,876
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
get_policy
|
def get_policy(observations, hparams, action_space):
"""Get a policy network.
Args:
observations: observations
hparams: parameters
action_space: action space
Returns:
Tuple (action logits, value).
"""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
obs_shape = common_layers.shape_list(observations)
(frame_height, frame_width) = obs_shape[2:4]
# TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup
# when possible and do this properly.
if hparams.policy_problem_name == "dummy_policy_problem_ttt":
tf.logging.info("Using DummyPolicyProblemTTT for the policy.")
policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()
else:
tf.logging.info("Using DummyPolicyProblem for the policy.")
policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)
trainer_lib.add_problem_hparams(hparams, policy_problem)
hparams.force_full_predict = True
model = registry.model(hparams.policy_network)(
hparams, tf.estimator.ModeKeys.TRAIN
)
try:
num_target_frames = hparams.video_num_target_frames
except AttributeError:
num_target_frames = 1
features = {
"inputs": observations,
"input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),
"target_action": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_reward": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_policy": tf.zeros(
obs_shape[:1] + [num_target_frames] + [action_space.n]),
"target_value": tf.zeros(
obs_shape[:1] + [num_target_frames])
}
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
t2t_model.create_dummy_vars()
(targets, _) = model(features)
return (targets["target_policy"][:, 0, :], targets["target_value"][:, 0])
|
python
|
def get_policy(observations, hparams, action_space):
"""Get a policy network.
Args:
observations: observations
hparams: parameters
action_space: action space
Returns:
Tuple (action logits, value).
"""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
obs_shape = common_layers.shape_list(observations)
(frame_height, frame_width) = obs_shape[2:4]
# TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup
# when possible and do this properly.
if hparams.policy_problem_name == "dummy_policy_problem_ttt":
tf.logging.info("Using DummyPolicyProblemTTT for the policy.")
policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()
else:
tf.logging.info("Using DummyPolicyProblem for the policy.")
policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)
trainer_lib.add_problem_hparams(hparams, policy_problem)
hparams.force_full_predict = True
model = registry.model(hparams.policy_network)(
hparams, tf.estimator.ModeKeys.TRAIN
)
try:
num_target_frames = hparams.video_num_target_frames
except AttributeError:
num_target_frames = 1
features = {
"inputs": observations,
"input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
"targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),
"target_action": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_reward": tf.zeros(
obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
"target_policy": tf.zeros(
obs_shape[:1] + [num_target_frames] + [action_space.n]),
"target_value": tf.zeros(
obs_shape[:1] + [num_target_frames])
}
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
t2t_model.create_dummy_vars()
(targets, _) = model(features)
return (targets["target_policy"][:, 0, :], targets["target_value"][:, 0])
|
[
"def",
"get_policy",
"(",
"observations",
",",
"hparams",
",",
"action_space",
")",
":",
"if",
"not",
"isinstance",
"(",
"action_space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"raise",
"ValueError",
"(",
"\"Expecting discrete action space.\"",
")",
"obs_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"observations",
")",
"(",
"frame_height",
",",
"frame_width",
")",
"=",
"obs_shape",
"[",
"2",
":",
"4",
"]",
"# TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup",
"# when possible and do this properly.",
"if",
"hparams",
".",
"policy_problem_name",
"==",
"\"dummy_policy_problem_ttt\"",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using DummyPolicyProblemTTT for the policy.\"",
")",
"policy_problem",
"=",
"tic_tac_toe_env",
".",
"DummyPolicyProblemTTT",
"(",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using DummyPolicyProblem for the policy.\"",
")",
"policy_problem",
"=",
"DummyPolicyProblem",
"(",
"action_space",
",",
"frame_height",
",",
"frame_width",
")",
"trainer_lib",
".",
"add_problem_hparams",
"(",
"hparams",
",",
"policy_problem",
")",
"hparams",
".",
"force_full_predict",
"=",
"True",
"model",
"=",
"registry",
".",
"model",
"(",
"hparams",
".",
"policy_network",
")",
"(",
"hparams",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
"try",
":",
"num_target_frames",
"=",
"hparams",
".",
"video_num_target_frames",
"except",
"AttributeError",
":",
"num_target_frames",
"=",
"1",
"features",
"=",
"{",
"\"inputs\"",
":",
"observations",
",",
"\"input_action\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"2",
"]",
"+",
"[",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"input_reward\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"2",
"]",
"+",
"[",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"targets\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"1",
"]",
"+",
"[",
"num_target_frames",
"]",
"+",
"obs_shape",
"[",
"2",
":",
"]",
")",
",",
"\"target_action\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"1",
"]",
"+",
"[",
"num_target_frames",
",",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"target_reward\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"1",
"]",
"+",
"[",
"num_target_frames",
",",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"target_policy\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"1",
"]",
"+",
"[",
"num_target_frames",
"]",
"+",
"[",
"action_space",
".",
"n",
"]",
")",
",",
"\"target_value\"",
":",
"tf",
".",
"zeros",
"(",
"obs_shape",
"[",
":",
"1",
"]",
"+",
"[",
"num_target_frames",
"]",
")",
"}",
"with",
"tf",
".",
"variable_scope",
"(",
"tf",
".",
"get_variable_scope",
"(",
")",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"t2t_model",
".",
"create_dummy_vars",
"(",
")",
"(",
"targets",
",",
"_",
")",
"=",
"model",
"(",
"features",
")",
"return",
"(",
"targets",
"[",
"\"target_policy\"",
"]",
"[",
":",
",",
"0",
",",
":",
"]",
",",
"targets",
"[",
"\"target_value\"",
"]",
"[",
":",
",",
"0",
"]",
")"
] |
Get a policy network.
Args:
observations: observations
hparams: parameters
action_space: action space
Returns:
Tuple (action logits, value).
|
[
"Get",
"a",
"policy",
"network",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L280-L332
|
21,877
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
rlmf_tictactoe
|
def rlmf_tictactoe():
"""Base set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.game = "tictactoe"
hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0"
# Since we don't have any no-op actions, otherwise we have to have an
# attribute called `get_action_meanings`.
hparams.eval_max_num_noops = 0
hparams.max_num_noops = 0
hparams.rl_should_derive_observation_space = False
hparams.policy_network = "feed_forward_categorical_policy"
hparams.base_algo_params = "ppo_ttt_params"
# Number of last observations to feed to the agent
hparams.frame_stack_size = 1
return hparams
|
python
|
def rlmf_tictactoe():
"""Base set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.game = "tictactoe"
hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0"
# Since we don't have any no-op actions, otherwise we have to have an
# attribute called `get_action_meanings`.
hparams.eval_max_num_noops = 0
hparams.max_num_noops = 0
hparams.rl_should_derive_observation_space = False
hparams.policy_network = "feed_forward_categorical_policy"
hparams.base_algo_params = "ppo_ttt_params"
# Number of last observations to feed to the agent
hparams.frame_stack_size = 1
return hparams
|
[
"def",
"rlmf_tictactoe",
"(",
")",
":",
"hparams",
"=",
"rlmf_original",
"(",
")",
"hparams",
".",
"game",
"=",
"\"tictactoe\"",
"hparams",
".",
"rl_env_name",
"=",
"\"T2TEnv-TicTacToeEnv-v0\"",
"# Since we don't have any no-op actions, otherwise we have to have an",
"# attribute called `get_action_meanings`.",
"hparams",
".",
"eval_max_num_noops",
"=",
"0",
"hparams",
".",
"max_num_noops",
"=",
"0",
"hparams",
".",
"rl_should_derive_observation_space",
"=",
"False",
"hparams",
".",
"policy_network",
"=",
"\"feed_forward_categorical_policy\"",
"hparams",
".",
"base_algo_params",
"=",
"\"ppo_ttt_params\"",
"# Number of last observations to feed to the agent",
"hparams",
".",
"frame_stack_size",
"=",
"1",
"return",
"hparams"
] |
Base set of hparams for model-free PPO.
|
[
"Base",
"set",
"of",
"hparams",
"for",
"model",
"-",
"free",
"PPO",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L427-L443
|
21,878
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
rlmf_tiny
|
def rlmf_tiny():
"""Tiny set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 2
hparams.base_algo_params = "ppo_original_tiny"
hparams.add_hparam("ppo_epochs_num", 3)
hparams.add_hparam("ppo_epoch_length", 2)
return hparams
|
python
|
def rlmf_tiny():
"""Tiny set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 2
hparams.base_algo_params = "ppo_original_tiny"
hparams.add_hparam("ppo_epochs_num", 3)
hparams.add_hparam("ppo_epoch_length", 2)
return hparams
|
[
"def",
"rlmf_tiny",
"(",
")",
":",
"hparams",
"=",
"rlmf_original",
"(",
")",
"hparams",
"=",
"hparams",
".",
"override_from_dict",
"(",
"rlmf_tiny_overrides",
"(",
")",
")",
"hparams",
".",
"batch_size",
"=",
"2",
"hparams",
".",
"base_algo_params",
"=",
"\"ppo_original_tiny\"",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_epochs_num\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_epoch_length\"",
",",
"2",
")",
"return",
"hparams"
] |
Tiny set of hparams for model-free PPO.
|
[
"Tiny",
"set",
"of",
"hparams",
"for",
"model",
"-",
"free",
"PPO",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L456-L464
|
21,879
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
rlmf_dqn_tiny
|
def rlmf_dqn_tiny():
"""Tiny DQN params."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 1
hparams.base_algo = "dqn"
hparams.base_algo_params = "dqn_original_params"
hparams.add_hparam("dqn_num_frames", 128)
hparams.add_hparam("dqn_save_every_steps", 128)
hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100)
hparams.add_hparam("dqn_agent_min_replay_history", 10)
return hparams
|
python
|
def rlmf_dqn_tiny():
"""Tiny DQN params."""
hparams = rlmf_original()
hparams = hparams.override_from_dict(rlmf_tiny_overrides())
hparams.batch_size = 1
hparams.base_algo = "dqn"
hparams.base_algo_params = "dqn_original_params"
hparams.add_hparam("dqn_num_frames", 128)
hparams.add_hparam("dqn_save_every_steps", 128)
hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100)
hparams.add_hparam("dqn_agent_min_replay_history", 10)
return hparams
|
[
"def",
"rlmf_dqn_tiny",
"(",
")",
":",
"hparams",
"=",
"rlmf_original",
"(",
")",
"hparams",
"=",
"hparams",
".",
"override_from_dict",
"(",
"rlmf_tiny_overrides",
"(",
")",
")",
"hparams",
".",
"batch_size",
"=",
"1",
"hparams",
".",
"base_algo",
"=",
"\"dqn\"",
"hparams",
".",
"base_algo_params",
"=",
"\"dqn_original_params\"",
"hparams",
".",
"add_hparam",
"(",
"\"dqn_num_frames\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"dqn_save_every_steps\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"dqn_replay_buffer_replay_capacity\"",
",",
"100",
")",
"hparams",
".",
"add_hparam",
"(",
"\"dqn_agent_min_replay_history\"",
",",
"10",
")",
"return",
"hparams"
] |
Tiny DQN params.
|
[
"Tiny",
"DQN",
"params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L468-L479
|
21,880
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
rlmf_eval
|
def rlmf_eval():
"""Eval set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.batch_size = 8
hparams.eval_sampling_temps = [0.0, 0.5, 1.0]
hparams.eval_rl_env_max_episode_steps = -1
hparams.add_hparam("ppo_epoch_length", 128)
hparams.add_hparam("ppo_optimization_batch_size", 32)
hparams.add_hparam("ppo_epochs_num", 10000)
hparams.add_hparam("ppo_eval_every_epochs", 500)
hparams.add_hparam("attempt", 0)
hparams.add_hparam("moe_loss_coef", 0)
return hparams
|
python
|
def rlmf_eval():
"""Eval set of hparams for model-free PPO."""
hparams = rlmf_original()
hparams.batch_size = 8
hparams.eval_sampling_temps = [0.0, 0.5, 1.0]
hparams.eval_rl_env_max_episode_steps = -1
hparams.add_hparam("ppo_epoch_length", 128)
hparams.add_hparam("ppo_optimization_batch_size", 32)
hparams.add_hparam("ppo_epochs_num", 10000)
hparams.add_hparam("ppo_eval_every_epochs", 500)
hparams.add_hparam("attempt", 0)
hparams.add_hparam("moe_loss_coef", 0)
return hparams
|
[
"def",
"rlmf_eval",
"(",
")",
":",
"hparams",
"=",
"rlmf_original",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"eval_sampling_temps",
"=",
"[",
"0.0",
",",
"0.5",
",",
"1.0",
"]",
"hparams",
".",
"eval_rl_env_max_episode_steps",
"=",
"-",
"1",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_epoch_length\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_optimization_batch_size\"",
",",
"32",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_epochs_num\"",
",",
"10000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ppo_eval_every_epochs\"",
",",
"500",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attempt\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"moe_loss_coef\"",
",",
"0",
")",
"return",
"hparams"
] |
Eval set of hparams for model-free PPO.
|
[
"Eval",
"set",
"of",
"hparams",
"for",
"model",
"-",
"free",
"PPO",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L483-L495
|
21,881
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/rl.py
|
feed_forward_gaussian_fun
|
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
|
python
|
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
|
[
"def",
"feed_forward_gaussian_fun",
"(",
"action_space",
",",
"config",
",",
"observations",
")",
":",
"if",
"not",
"isinstance",
"(",
"action_space",
",",
"gym",
".",
"spaces",
".",
"box",
".",
"Box",
")",
":",
"raise",
"ValueError",
"(",
"\"Expecting continuous action space.\"",
")",
"mean_weights_initializer",
"=",
"tf",
".",
"initializers",
".",
"variance_scaling",
"(",
"scale",
"=",
"config",
".",
"init_mean_factor",
")",
"logstd_initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"config",
".",
"init_logstd",
",",
"1e-10",
")",
"flat_observations",
"=",
"tf",
".",
"reshape",
"(",
"observations",
",",
"[",
"tf",
".",
"shape",
"(",
"observations",
")",
"[",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"observations",
")",
"[",
"1",
"]",
",",
"functools",
".",
"reduce",
"(",
"operator",
".",
"mul",
",",
"observations",
".",
"shape",
".",
"as_list",
"(",
")",
"[",
"2",
":",
"]",
",",
"1",
")",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"network_parameters\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"policy\"",
")",
":",
"x",
"=",
"flat_observations",
"for",
"size",
"in",
"config",
".",
"policy_layers",
":",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"x",
",",
"size",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"mean",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"x",
",",
"action_space",
".",
"shape",
"[",
"0",
"]",
",",
"activation",
"=",
"tf",
".",
"tanh",
",",
"kernel_initializer",
"=",
"mean_weights_initializer",
")",
"logstd",
"=",
"tf",
".",
"get_variable",
"(",
"\"logstd\"",
",",
"mean",
".",
"shape",
"[",
"2",
":",
"]",
",",
"tf",
".",
"float32",
",",
"logstd_initializer",
")",
"logstd",
"=",
"tf",
".",
"tile",
"(",
"logstd",
"[",
"None",
",",
"None",
"]",
",",
"[",
"tf",
".",
"shape",
"(",
"mean",
")",
"[",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"mean",
")",
"[",
"1",
"]",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"mean",
".",
"shape",
".",
"ndims",
"-",
"2",
")",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"value\"",
")",
":",
"x",
"=",
"flat_observations",
"for",
"size",
"in",
"config",
".",
"value_layers",
":",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"x",
",",
"size",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"value",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"x",
",",
"1",
")",
"[",
"...",
",",
"0",
"]",
"mean",
"=",
"tf",
".",
"check_numerics",
"(",
"mean",
",",
"\"mean\"",
")",
"logstd",
"=",
"tf",
".",
"check_numerics",
"(",
"logstd",
",",
"\"logstd\"",
")",
"value",
"=",
"tf",
".",
"check_numerics",
"(",
"value",
",",
"\"value\"",
")",
"policy",
"=",
"tfp",
".",
"distributions",
".",
"MultivariateNormalDiag",
"(",
"mean",
",",
"tf",
".",
"exp",
"(",
"logstd",
")",
")",
"return",
"NetworkOutput",
"(",
"policy",
",",
"value",
",",
"lambda",
"a",
":",
"tf",
".",
"clip_by_value",
"(",
"a",
",",
"-",
"2.",
",",
"2",
")",
")"
] |
Feed-forward Gaussian.
|
[
"Feed",
"-",
"forward",
"Gaussian",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/rl.py#L559-L596
|
21,882
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._curvature_range
|
def _curvature_range(self):
"""Curvature range.
Returns:
h_max_t, h_min_t ops
"""
self._curv_win = tf.get_variable("curv_win",
dtype=tf.float32,
trainable=False,
shape=[self.curvature_window_width,],
initializer=tf.zeros_initializer)
# We use log smoothing for curvature range
self._curv_win = tf.scatter_update(self._curv_win,
self._step % self.curvature_window_width,
tf.log(self._grad_norm_squared))
# Note here the iterations start from iteration 0
valid_window = tf.slice(self._curv_win,
tf.constant([0,]),
tf.expand_dims(
tf.minimum(
tf.constant(self.curvature_window_width),
self._step + 1), dim=0))
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t]):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
with tf.control_dependencies([avg_op]):
self._h_min = tf.exp(
tf.identity(self._moving_averager.average(self._h_min_t)))
self._h_max = tf.exp(
tf.identity(self._moving_averager.average(self._h_max_t)))
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
curv_range_ops.append(avg_op)
return curv_range_ops
|
python
|
def _curvature_range(self):
"""Curvature range.
Returns:
h_max_t, h_min_t ops
"""
self._curv_win = tf.get_variable("curv_win",
dtype=tf.float32,
trainable=False,
shape=[self.curvature_window_width,],
initializer=tf.zeros_initializer)
# We use log smoothing for curvature range
self._curv_win = tf.scatter_update(self._curv_win,
self._step % self.curvature_window_width,
tf.log(self._grad_norm_squared))
# Note here the iterations start from iteration 0
valid_window = tf.slice(self._curv_win,
tf.constant([0,]),
tf.expand_dims(
tf.minimum(
tf.constant(self.curvature_window_width),
self._step + 1), dim=0))
self._h_min_t = tf.reduce_min(valid_window)
self._h_max_t = tf.reduce_max(valid_window)
curv_range_ops = []
with tf.control_dependencies([self._h_min_t, self._h_max_t]):
avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])
with tf.control_dependencies([avg_op]):
self._h_min = tf.exp(
tf.identity(self._moving_averager.average(self._h_min_t)))
self._h_max = tf.exp(
tf.identity(self._moving_averager.average(self._h_max_t)))
if self._sparsity_debias:
self._h_min *= self._sparsity_avg
self._h_max *= self._sparsity_avg
curv_range_ops.append(avg_op)
return curv_range_ops
|
[
"def",
"_curvature_range",
"(",
"self",
")",
":",
"self",
".",
"_curv_win",
"=",
"tf",
".",
"get_variable",
"(",
"\"curv_win\"",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"trainable",
"=",
"False",
",",
"shape",
"=",
"[",
"self",
".",
"curvature_window_width",
",",
"]",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
")",
"# We use log smoothing for curvature range",
"self",
".",
"_curv_win",
"=",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_curv_win",
",",
"self",
".",
"_step",
"%",
"self",
".",
"curvature_window_width",
",",
"tf",
".",
"log",
"(",
"self",
".",
"_grad_norm_squared",
")",
")",
"# Note here the iterations start from iteration 0",
"valid_window",
"=",
"tf",
".",
"slice",
"(",
"self",
".",
"_curv_win",
",",
"tf",
".",
"constant",
"(",
"[",
"0",
",",
"]",
")",
",",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"minimum",
"(",
"tf",
".",
"constant",
"(",
"self",
".",
"curvature_window_width",
")",
",",
"self",
".",
"_step",
"+",
"1",
")",
",",
"dim",
"=",
"0",
")",
")",
"self",
".",
"_h_min_t",
"=",
"tf",
".",
"reduce_min",
"(",
"valid_window",
")",
"self",
".",
"_h_max_t",
"=",
"tf",
".",
"reduce_max",
"(",
"valid_window",
")",
"curv_range_ops",
"=",
"[",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"self",
".",
"_h_min_t",
",",
"self",
".",
"_h_max_t",
"]",
")",
":",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"[",
"self",
".",
"_h_min_t",
",",
"self",
".",
"_h_max_t",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_h_min",
"=",
"tf",
".",
"exp",
"(",
"tf",
".",
"identity",
"(",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"self",
".",
"_h_min_t",
")",
")",
")",
"self",
".",
"_h_max",
"=",
"tf",
".",
"exp",
"(",
"tf",
".",
"identity",
"(",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"self",
".",
"_h_max_t",
")",
")",
")",
"if",
"self",
".",
"_sparsity_debias",
":",
"self",
".",
"_h_min",
"*=",
"self",
".",
"_sparsity_avg",
"self",
".",
"_h_max",
"*=",
"self",
".",
"_sparsity_avg",
"curv_range_ops",
".",
"append",
"(",
"avg_op",
")",
"return",
"curv_range_ops"
] |
Curvature range.
Returns:
h_max_t, h_min_t ops
|
[
"Curvature",
"range",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L193-L230
|
21,883
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._grad_variance
|
def _grad_variance(self):
"""Estimate of gradient Variance.
Returns:
C_t ops.
"""
grad_var_ops = []
tensor_to_avg = []
for t, g in zip(self._vars, self._grad):
if isinstance(g, tf.IndexedSlices):
tensor_to_avg.append(
tf.reshape(tf.unsorted_segment_sum(g.values,
g.indices,
g.dense_shape[0]),
shape=t.get_shape()))
else:
tensor_to_avg.append(g)
avg_op = self._moving_averager.apply(tensor_to_avg)
grad_var_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_avg = [self._moving_averager.average(val)
for val in tensor_to_avg]
self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]
# Compute Variance
self._grad_var = tf.maximum(
tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
self._grad_norm_squared_avg
- tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
if self._sparsity_debias:
self._grad_var *= self._sparsity_avg
return grad_var_ops
|
python
|
def _grad_variance(self):
"""Estimate of gradient Variance.
Returns:
C_t ops.
"""
grad_var_ops = []
tensor_to_avg = []
for t, g in zip(self._vars, self._grad):
if isinstance(g, tf.IndexedSlices):
tensor_to_avg.append(
tf.reshape(tf.unsorted_segment_sum(g.values,
g.indices,
g.dense_shape[0]),
shape=t.get_shape()))
else:
tensor_to_avg.append(g)
avg_op = self._moving_averager.apply(tensor_to_avg)
grad_var_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_avg = [self._moving_averager.average(val)
for val in tensor_to_avg]
self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]
# Compute Variance
self._grad_var = tf.maximum(
tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
self._grad_norm_squared_avg
- tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
if self._sparsity_debias:
self._grad_var *= self._sparsity_avg
return grad_var_ops
|
[
"def",
"_grad_variance",
"(",
"self",
")",
":",
"grad_var_ops",
"=",
"[",
"]",
"tensor_to_avg",
"=",
"[",
"]",
"for",
"t",
",",
"g",
"in",
"zip",
"(",
"self",
".",
"_vars",
",",
"self",
".",
"_grad",
")",
":",
"if",
"isinstance",
"(",
"g",
",",
"tf",
".",
"IndexedSlices",
")",
":",
"tensor_to_avg",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"tf",
".",
"unsorted_segment_sum",
"(",
"g",
".",
"values",
",",
"g",
".",
"indices",
",",
"g",
".",
"dense_shape",
"[",
"0",
"]",
")",
",",
"shape",
"=",
"t",
".",
"get_shape",
"(",
")",
")",
")",
"else",
":",
"tensor_to_avg",
".",
"append",
"(",
"g",
")",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"tensor_to_avg",
")",
"grad_var_ops",
".",
"append",
"(",
"avg_op",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_grad_avg",
"=",
"[",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"val",
")",
"for",
"val",
"in",
"tensor_to_avg",
"]",
"self",
".",
"_grad_avg_squared",
"=",
"[",
"tf",
".",
"square",
"(",
"val",
")",
"for",
"val",
"in",
"self",
".",
"_grad_avg",
"]",
"# Compute Variance",
"self",
".",
"_grad_var",
"=",
"tf",
".",
"maximum",
"(",
"tf",
".",
"constant",
"(",
"1e-6",
",",
"dtype",
"=",
"self",
".",
"_grad_norm_squared_avg",
".",
"dtype",
")",
",",
"self",
".",
"_grad_norm_squared_avg",
"-",
"tf",
".",
"add_n",
"(",
"[",
"tf",
".",
"reduce_sum",
"(",
"val",
")",
"for",
"val",
"in",
"self",
".",
"_grad_avg_squared",
"]",
")",
")",
"if",
"self",
".",
"_sparsity_debias",
":",
"self",
".",
"_grad_var",
"*=",
"self",
".",
"_sparsity_avg",
"return",
"grad_var_ops"
] |
Estimate of gradient Variance.
Returns:
C_t ops.
|
[
"Estimate",
"of",
"gradient",
"Variance",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L232-L263
|
21,884
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._dist_to_opt
|
def _dist_to_opt(self):
"""Distance to optimum.
Returns:
D_t ops
"""
dist_to_opt_ops = []
# Running average of the norm of gradient
self._grad_norm = tf.sqrt(self._grad_norm_squared)
avg_op = self._moving_averager.apply([self._grad_norm,])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
# Single iteration distance estimation, note here
# self._grad_norm_avg is per variable
self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
# Running average of distance
avg_op = self._moving_averager.apply([self._d_t])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._dist_to_opt_avg = tf.identity(
self._moving_averager.average(self._d_t))
if self._sparsity_debias:
self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
return dist_to_opt_ops
|
python
|
def _dist_to_opt(self):
"""Distance to optimum.
Returns:
D_t ops
"""
dist_to_opt_ops = []
# Running average of the norm of gradient
self._grad_norm = tf.sqrt(self._grad_norm_squared)
avg_op = self._moving_averager.apply([self._grad_norm,])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
# Single iteration distance estimation, note here
# self._grad_norm_avg is per variable
self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
# Running average of distance
avg_op = self._moving_averager.apply([self._d_t])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._dist_to_opt_avg = tf.identity(
self._moving_averager.average(self._d_t))
if self._sparsity_debias:
self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
return dist_to_opt_ops
|
[
"def",
"_dist_to_opt",
"(",
"self",
")",
":",
"dist_to_opt_ops",
"=",
"[",
"]",
"# Running average of the norm of gradient",
"self",
".",
"_grad_norm",
"=",
"tf",
".",
"sqrt",
"(",
"self",
".",
"_grad_norm_squared",
")",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"[",
"self",
".",
"_grad_norm",
",",
"]",
")",
"dist_to_opt_ops",
".",
"append",
"(",
"avg_op",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_grad_norm_avg",
"=",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"self",
".",
"_grad_norm",
")",
"# Single iteration distance estimation, note here",
"# self._grad_norm_avg is per variable",
"self",
".",
"_d_t",
"=",
"self",
".",
"_grad_norm_avg",
"/",
"self",
".",
"_grad_norm_squared_avg",
"# Running average of distance",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"[",
"self",
".",
"_d_t",
"]",
")",
"dist_to_opt_ops",
".",
"append",
"(",
"avg_op",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_dist_to_opt_avg",
"=",
"tf",
".",
"identity",
"(",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"self",
".",
"_d_t",
")",
")",
"if",
"self",
".",
"_sparsity_debias",
":",
"self",
".",
"_dist_to_opt_avg",
"/=",
"tf",
".",
"sqrt",
"(",
"self",
".",
"_sparsity_avg",
")",
"return",
"dist_to_opt_ops"
] |
Distance to optimum.
Returns:
D_t ops
|
[
"Distance",
"to",
"optimum",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L265-L289
|
21,885
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._grad_sparsity
|
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
|
python
|
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
|
[
"def",
"_grad_sparsity",
"(",
"self",
")",
":",
"# If the sparse minibatch gradient has 10 percent of its entries",
"# non-zero, its sparsity is 0.1.",
"# The norm of dense gradient averaged from full dataset",
"# are roughly estimated norm of minibatch",
"# sparse gradient norm * sqrt(sparsity)",
"# An extension maybe only correct the sparse blob.",
"non_zero_cnt",
"=",
"tf",
".",
"add_n",
"(",
"[",
"tf",
".",
"count_nonzero",
"(",
"g",
")",
"for",
"g",
"in",
"self",
".",
"_grad",
"]",
")",
"all_entry_cnt",
"=",
"tf",
".",
"add_n",
"(",
"[",
"tf",
".",
"size",
"(",
"g",
")",
"for",
"g",
"in",
"self",
".",
"_grad",
"]",
")",
"self",
".",
"_sparsity",
"=",
"tf",
".",
"cast",
"(",
"non_zero_cnt",
",",
"self",
".",
"_grad",
"[",
"0",
"]",
".",
"dtype",
")",
"self",
".",
"_sparsity",
"/=",
"tf",
".",
"cast",
"(",
"all_entry_cnt",
",",
"self",
".",
"_grad",
"[",
"0",
"]",
".",
"dtype",
")",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"[",
"self",
".",
"_sparsity",
",",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_sparsity_avg",
"=",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"self",
".",
"_sparsity",
")",
"return",
"avg_op"
] |
Gradient sparsity.
|
[
"Gradient",
"sparsity",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L291-L306
|
21,886
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._prepare_variables
|
def _prepare_variables(self):
"""Prepare Variables for YellowFin.
Returns:
Grad**2, Norm, Norm**2, Mean(Norm**2) ops
"""
self._moving_averager = tf.train.ExponentialMovingAverage(
decay=self._beta, zero_debias=self._zero_debias)
# assert self._grad is not None and len(self._grad) > 0
# List for the returned Operations
prepare_variables_op = []
# Get per var g**2 and norm**2
self._grad_squared = []
self._grad_norm_squared = []
# Gradient squared
for v, g in zip(self._vars, self._grad):
if g is None: continue
with tf.colocate_with(v):
self._grad_squared.append(tf.square(g))
# Norm squared.
self._grad_norm_squared = [tf.reduce_sum(g_sq)
for g_sq in self._grad_squared]
if self._sparsity_debias:
avg_op_sparsity = self._grad_sparsity()
prepare_variables_op.append(avg_op_sparsity)
# The following running average on squared norm of gradient
# is shared by grad_var and dist_to_opt
avg_op = self._moving_averager.apply(self._grad_norm_squared)
with tf.control_dependencies([avg_op]):
self._grad_norm_squared_avg = [self._moving_averager.average(val)
for val in self._grad_norm_squared]
self._grad_norm_squared = tf.add_n(self._grad_norm_squared)
self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg)
prepare_variables_op.append(avg_op)
return tf.group(*prepare_variables_op)
|
python
|
def _prepare_variables(self):
"""Prepare Variables for YellowFin.
Returns:
Grad**2, Norm, Norm**2, Mean(Norm**2) ops
"""
self._moving_averager = tf.train.ExponentialMovingAverage(
decay=self._beta, zero_debias=self._zero_debias)
# assert self._grad is not None and len(self._grad) > 0
# List for the returned Operations
prepare_variables_op = []
# Get per var g**2 and norm**2
self._grad_squared = []
self._grad_norm_squared = []
# Gradient squared
for v, g in zip(self._vars, self._grad):
if g is None: continue
with tf.colocate_with(v):
self._grad_squared.append(tf.square(g))
# Norm squared.
self._grad_norm_squared = [tf.reduce_sum(g_sq)
for g_sq in self._grad_squared]
if self._sparsity_debias:
avg_op_sparsity = self._grad_sparsity()
prepare_variables_op.append(avg_op_sparsity)
# The following running average on squared norm of gradient
# is shared by grad_var and dist_to_opt
avg_op = self._moving_averager.apply(self._grad_norm_squared)
with tf.control_dependencies([avg_op]):
self._grad_norm_squared_avg = [self._moving_averager.average(val)
for val in self._grad_norm_squared]
self._grad_norm_squared = tf.add_n(self._grad_norm_squared)
self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg)
prepare_variables_op.append(avg_op)
return tf.group(*prepare_variables_op)
|
[
"def",
"_prepare_variables",
"(",
"self",
")",
":",
"self",
".",
"_moving_averager",
"=",
"tf",
".",
"train",
".",
"ExponentialMovingAverage",
"(",
"decay",
"=",
"self",
".",
"_beta",
",",
"zero_debias",
"=",
"self",
".",
"_zero_debias",
")",
"# assert self._grad is not None and len(self._grad) > 0",
"# List for the returned Operations",
"prepare_variables_op",
"=",
"[",
"]",
"# Get per var g**2 and norm**2",
"self",
".",
"_grad_squared",
"=",
"[",
"]",
"self",
".",
"_grad_norm_squared",
"=",
"[",
"]",
"# Gradient squared",
"for",
"v",
",",
"g",
"in",
"zip",
"(",
"self",
".",
"_vars",
",",
"self",
".",
"_grad",
")",
":",
"if",
"g",
"is",
"None",
":",
"continue",
"with",
"tf",
".",
"colocate_with",
"(",
"v",
")",
":",
"self",
".",
"_grad_squared",
".",
"append",
"(",
"tf",
".",
"square",
"(",
"g",
")",
")",
"# Norm squared.",
"self",
".",
"_grad_norm_squared",
"=",
"[",
"tf",
".",
"reduce_sum",
"(",
"g_sq",
")",
"for",
"g_sq",
"in",
"self",
".",
"_grad_squared",
"]",
"if",
"self",
".",
"_sparsity_debias",
":",
"avg_op_sparsity",
"=",
"self",
".",
"_grad_sparsity",
"(",
")",
"prepare_variables_op",
".",
"append",
"(",
"avg_op_sparsity",
")",
"# The following running average on squared norm of gradient",
"# is shared by grad_var and dist_to_opt",
"avg_op",
"=",
"self",
".",
"_moving_averager",
".",
"apply",
"(",
"self",
".",
"_grad_norm_squared",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"avg_op",
"]",
")",
":",
"self",
".",
"_grad_norm_squared_avg",
"=",
"[",
"self",
".",
"_moving_averager",
".",
"average",
"(",
"val",
")",
"for",
"val",
"in",
"self",
".",
"_grad_norm_squared",
"]",
"self",
".",
"_grad_norm_squared",
"=",
"tf",
".",
"add_n",
"(",
"self",
".",
"_grad_norm_squared",
")",
"self",
".",
"_grad_norm_squared_avg",
"=",
"tf",
".",
"add_n",
"(",
"self",
".",
"_grad_norm_squared_avg",
")",
"prepare_variables_op",
".",
"append",
"(",
"avg_op",
")",
"return",
"tf",
".",
"group",
"(",
"*",
"prepare_variables_op",
")"
] |
Prepare Variables for YellowFin.
Returns:
Grad**2, Norm, Norm**2, Mean(Norm**2) ops
|
[
"Prepare",
"Variables",
"for",
"YellowFin",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L308-L349
|
21,887
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._get_cubic_root
|
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
|
python
|
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
|
[
"def",
"_get_cubic_root",
"(",
"self",
")",
":",
"# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2",
"# where x = sqrt(mu).",
"# We substitute x, which is sqrt(mu), with x = y + 1.",
"# It gives y^3 + py = q",
"# where p = (D^2 h_min^2)/(2*C) and q = -p.",
"# We use the Vieta's substitution to compute the root.",
"# There is only one real solution y (which is in [0, 1] ).",
"# http://mathworld.wolfram.com/VietasSubstitution.html",
"assert_array",
"=",
"[",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_nan",
"(",
"self",
".",
"_dist_to_opt_avg",
")",
")",
",",
"[",
"self",
".",
"_dist_to_opt_avg",
",",
"]",
")",
",",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_nan",
"(",
"self",
".",
"_h_min",
")",
")",
",",
"[",
"self",
".",
"_h_min",
",",
"]",
")",
",",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_nan",
"(",
"self",
".",
"_grad_var",
")",
")",
",",
"[",
"self",
".",
"_grad_var",
",",
"]",
")",
",",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_inf",
"(",
"self",
".",
"_dist_to_opt_avg",
")",
")",
",",
"[",
"self",
".",
"_dist_to_opt_avg",
",",
"]",
")",
",",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_inf",
"(",
"self",
".",
"_h_min",
")",
")",
",",
"[",
"self",
".",
"_h_min",
",",
"]",
")",
",",
"tf",
".",
"Assert",
"(",
"tf",
".",
"logical_not",
"(",
"tf",
".",
"is_inf",
"(",
"self",
".",
"_grad_var",
")",
")",
",",
"[",
"self",
".",
"_grad_var",
",",
"]",
")",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"assert_array",
")",
":",
"p",
"=",
"self",
".",
"_dist_to_opt_avg",
"**",
"2",
"*",
"self",
".",
"_h_min",
"**",
"2",
"/",
"2",
"/",
"self",
".",
"_grad_var",
"w3",
"=",
"(",
"-",
"tf",
".",
"sqrt",
"(",
"p",
"**",
"2",
"+",
"4.0",
"/",
"27.0",
"*",
"p",
"**",
"3",
")",
"-",
"p",
")",
"/",
"2.0",
"w",
"=",
"tf",
".",
"sign",
"(",
"w3",
")",
"*",
"tf",
".",
"pow",
"(",
"tf",
".",
"abs",
"(",
"w3",
")",
",",
"1.0",
"/",
"3.0",
")",
"y",
"=",
"w",
"-",
"p",
"/",
"3.0",
"/",
"w",
"x",
"=",
"y",
"+",
"1",
"return",
"x"
] |
Get the cubic root.
|
[
"Get",
"the",
"cubic",
"root",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L351-L387
|
21,888
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._get_lr_tensor
|
def _get_lr_tensor(self):
"""Get lr minimizing the surrogate.
Returns:
The lr_t.
"""
lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min
return lr
|
python
|
def _get_lr_tensor(self):
"""Get lr minimizing the surrogate.
Returns:
The lr_t.
"""
lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min
return lr
|
[
"def",
"_get_lr_tensor",
"(",
"self",
")",
":",
"lr",
"=",
"tf",
".",
"squared_difference",
"(",
"1.0",
",",
"tf",
".",
"sqrt",
"(",
"self",
".",
"_mu",
")",
")",
"/",
"self",
".",
"_h_min",
"return",
"lr"
] |
Get lr minimizing the surrogate.
Returns:
The lr_t.
|
[
"Get",
"lr",
"minimizing",
"the",
"surrogate",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L389-L396
|
21,889
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._get_mu_tensor
|
def _get_mu_tensor(self):
"""Get the min mu which minimize the surrogate.
Returns:
The mu_t.
"""
root = self._get_cubic_root()
dr = self._h_max / self._h_min
mu = tf.maximum(
root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2)
return mu
|
python
|
def _get_mu_tensor(self):
"""Get the min mu which minimize the surrogate.
Returns:
The mu_t.
"""
root = self._get_cubic_root()
dr = self._h_max / self._h_min
mu = tf.maximum(
root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2)
return mu
|
[
"def",
"_get_mu_tensor",
"(",
"self",
")",
":",
"root",
"=",
"self",
".",
"_get_cubic_root",
"(",
")",
"dr",
"=",
"self",
".",
"_h_max",
"/",
"self",
".",
"_h_min",
"mu",
"=",
"tf",
".",
"maximum",
"(",
"root",
"**",
"2",
",",
"(",
"(",
"tf",
".",
"sqrt",
"(",
"dr",
")",
"-",
"1",
")",
"/",
"(",
"tf",
".",
"sqrt",
"(",
"dr",
")",
"+",
"1",
")",
")",
"**",
"2",
")",
"return",
"mu"
] |
Get the min mu which minimize the surrogate.
Returns:
The mu_t.
|
[
"Get",
"the",
"min",
"mu",
"which",
"minimize",
"the",
"surrogate",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L398-L408
|
21,890
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer._yellowfin
|
def _yellowfin(self):
"""YellowFin auto-tuning optimizer based on momentum SGD.
Returns:
YF ops
(Curvature range,
Grad_variance,
Dist_to_opt,
Single-Step,
Auto-Tuning)
"""
# List for the returned Operations.
yellowfin_ops = []
# Curvature range ops.
curv_range_ops = self._curvature_range()
yellowfin_ops += curv_range_ops
# Estimate of gradient Variance ops.
grad_var_ops = self._grad_variance()
yellowfin_ops += grad_var_ops
# Distance to optimum ops.
dist_to_opt_ops = self._dist_to_opt()
yellowfin_ops += dist_to_opt_ops
# Single-Step: minimizes the surrogate for the expected
# squared distance from the optimum of a local quadratic
# approximation after a single step while keeping all directions in the
# robust region.
self._mu = tf.identity(tf.cond(self._do_tune,
self._get_mu_tensor,
lambda: self._mu_var))
with tf.control_dependencies([self._mu]):
self._lr = tf.identity(tf.cond(self._do_tune,
self._get_lr_tensor,
lambda: self._lr_var))
# Tune learning rate and momentum.
with tf.control_dependencies([self._mu, self._lr]):
self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
yellowfin_ops.append(tf.assign(self._mu_var, self._mu))
yellowfin_ops.append(tf.assign(self._lr_var, self._lr))
yellowfin_ops = tf.group(*yellowfin_ops)
return yellowfin_ops
|
python
|
def _yellowfin(self):
"""YellowFin auto-tuning optimizer based on momentum SGD.
Returns:
YF ops
(Curvature range,
Grad_variance,
Dist_to_opt,
Single-Step,
Auto-Tuning)
"""
# List for the returned Operations.
yellowfin_ops = []
# Curvature range ops.
curv_range_ops = self._curvature_range()
yellowfin_ops += curv_range_ops
# Estimate of gradient Variance ops.
grad_var_ops = self._grad_variance()
yellowfin_ops += grad_var_ops
# Distance to optimum ops.
dist_to_opt_ops = self._dist_to_opt()
yellowfin_ops += dist_to_opt_ops
# Single-Step: minimizes the surrogate for the expected
# squared distance from the optimum of a local quadratic
# approximation after a single step while keeping all directions in the
# robust region.
self._mu = tf.identity(tf.cond(self._do_tune,
self._get_mu_tensor,
lambda: self._mu_var))
with tf.control_dependencies([self._mu]):
self._lr = tf.identity(tf.cond(self._do_tune,
self._get_lr_tensor,
lambda: self._lr_var))
# Tune learning rate and momentum.
with tf.control_dependencies([self._mu, self._lr]):
self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
yellowfin_ops.append(tf.assign(self._mu_var, self._mu))
yellowfin_ops.append(tf.assign(self._lr_var, self._lr))
yellowfin_ops = tf.group(*yellowfin_ops)
return yellowfin_ops
|
[
"def",
"_yellowfin",
"(",
"self",
")",
":",
"# List for the returned Operations.",
"yellowfin_ops",
"=",
"[",
"]",
"# Curvature range ops.",
"curv_range_ops",
"=",
"self",
".",
"_curvature_range",
"(",
")",
"yellowfin_ops",
"+=",
"curv_range_ops",
"# Estimate of gradient Variance ops.",
"grad_var_ops",
"=",
"self",
".",
"_grad_variance",
"(",
")",
"yellowfin_ops",
"+=",
"grad_var_ops",
"# Distance to optimum ops.",
"dist_to_opt_ops",
"=",
"self",
".",
"_dist_to_opt",
"(",
")",
"yellowfin_ops",
"+=",
"dist_to_opt_ops",
"# Single-Step: minimizes the surrogate for the expected",
"# squared distance from the optimum of a local quadratic",
"# approximation after a single step while keeping all directions in the",
"# robust region.",
"self",
".",
"_mu",
"=",
"tf",
".",
"identity",
"(",
"tf",
".",
"cond",
"(",
"self",
".",
"_do_tune",
",",
"self",
".",
"_get_mu_tensor",
",",
"lambda",
":",
"self",
".",
"_mu_var",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"self",
".",
"_mu",
"]",
")",
":",
"self",
".",
"_lr",
"=",
"tf",
".",
"identity",
"(",
"tf",
".",
"cond",
"(",
"self",
".",
"_do_tune",
",",
"self",
".",
"_get_lr_tensor",
",",
"lambda",
":",
"self",
".",
"_lr_var",
")",
")",
"# Tune learning rate and momentum.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"self",
".",
"_mu",
",",
"self",
".",
"_lr",
"]",
")",
":",
"self",
".",
"_mu",
"=",
"self",
".",
"_beta",
"*",
"self",
".",
"_mu_var",
"+",
"(",
"1",
"-",
"self",
".",
"_beta",
")",
"*",
"self",
".",
"_mu",
"self",
".",
"_lr",
"=",
"self",
".",
"_beta",
"*",
"self",
".",
"_lr_var",
"+",
"(",
"1",
"-",
"self",
".",
"_beta",
")",
"*",
"self",
".",
"_lr",
"yellowfin_ops",
".",
"append",
"(",
"tf",
".",
"assign",
"(",
"self",
".",
"_mu_var",
",",
"self",
".",
"_mu",
")",
")",
"yellowfin_ops",
".",
"append",
"(",
"tf",
".",
"assign",
"(",
"self",
".",
"_lr_var",
",",
"self",
".",
"_lr",
")",
")",
"yellowfin_ops",
"=",
"tf",
".",
"group",
"(",
"*",
"yellowfin_ops",
")",
"return",
"yellowfin_ops"
] |
YellowFin auto-tuning optimizer based on momentum SGD.
Returns:
YF ops
(Curvature range,
Grad_variance,
Dist_to_opt,
Single-Step,
Auto-Tuning)
|
[
"YellowFin",
"auto",
"-",
"tuning",
"optimizer",
"based",
"on",
"momentum",
"SGD",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L410-L454
|
21,891
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer.apply_gradients
|
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
"""
self._grad, self._vars = zip(*[(g, t)
for g, t in grads_and_vars if g is not None])
# Var update with Momentum.
with tf.variable_scope("apply_updates"):
# Gradient Clipping?
if self._clip_thresh_var is not None:
self._grad, _ = tf.clip_by_global_norm(
self._grad, self._clip_thresh_var)
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
else:
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
# Begin lr and mu tuning.
with tf.variable_scope("prepare_yellowFin_variables"):
# the dependencies ideally only need to be after clip is done,
# i.e. depends on self._grads. However, the control_dependencies
# does not support indexed slice for sparse gradients.
# The alternative dependencies here might be slightly slower due
# to less parallelization.
with tf.control_dependencies([apply_grad_op,]):
prepare_variables_op = self._prepare_variables()
with tf.variable_scope("yellowfin"):
with tf.control_dependencies([prepare_variables_op]):
yellowfin_op = self._yellowfin()
# Update YellowFin step variable.
with tf.control_dependencies([yellowfin_op]):
self._increment_step_op = tf.assign_add(self._step, 1).op
return tf.group(apply_grad_op,
prepare_variables_op,
yellowfin_op,
self._increment_step_op)
|
python
|
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
"""
self._grad, self._vars = zip(*[(g, t)
for g, t in grads_and_vars if g is not None])
# Var update with Momentum.
with tf.variable_scope("apply_updates"):
# Gradient Clipping?
if self._clip_thresh_var is not None:
self._grad, _ = tf.clip_by_global_norm(
self._grad, self._clip_thresh_var)
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
else:
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
# Begin lr and mu tuning.
with tf.variable_scope("prepare_yellowFin_variables"):
# the dependencies ideally only need to be after clip is done,
# i.e. depends on self._grads. However, the control_dependencies
# does not support indexed slice for sparse gradients.
# The alternative dependencies here might be slightly slower due
# to less parallelization.
with tf.control_dependencies([apply_grad_op,]):
prepare_variables_op = self._prepare_variables()
with tf.variable_scope("yellowfin"):
with tf.control_dependencies([prepare_variables_op]):
yellowfin_op = self._yellowfin()
# Update YellowFin step variable.
with tf.control_dependencies([yellowfin_op]):
self._increment_step_op = tf.assign_add(self._step, 1).op
return tf.group(apply_grad_op,
prepare_variables_op,
yellowfin_op,
self._increment_step_op)
|
[
"def",
"apply_gradients",
"(",
"self",
",",
"grads_and_vars",
",",
"global_step",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"self",
".",
"_grad",
",",
"self",
".",
"_vars",
"=",
"zip",
"(",
"*",
"[",
"(",
"g",
",",
"t",
")",
"for",
"g",
",",
"t",
"in",
"grads_and_vars",
"if",
"g",
"is",
"not",
"None",
"]",
")",
"# Var update with Momentum.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"apply_updates\"",
")",
":",
"# Gradient Clipping?",
"if",
"self",
".",
"_clip_thresh_var",
"is",
"not",
"None",
":",
"self",
".",
"_grad",
",",
"_",
"=",
"tf",
".",
"clip_by_global_norm",
"(",
"self",
".",
"_grad",
",",
"self",
".",
"_clip_thresh_var",
")",
"apply_grad_op",
"=",
"self",
".",
"_momentum_optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"self",
".",
"_grad",
",",
"self",
".",
"_vars",
")",
",",
"global_step",
"=",
"global_step",
",",
"name",
"=",
"name",
")",
"else",
":",
"apply_grad_op",
"=",
"self",
".",
"_momentum_optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"self",
".",
"_grad",
",",
"self",
".",
"_vars",
")",
",",
"global_step",
"=",
"global_step",
",",
"name",
"=",
"name",
")",
"# Begin lr and mu tuning.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"prepare_yellowFin_variables\"",
")",
":",
"# the dependencies ideally only need to be after clip is done,",
"# i.e. depends on self._grads. However, the control_dependencies",
"# does not support indexed slice for sparse gradients.",
"# The alternative dependencies here might be slightly slower due",
"# to less parallelization.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"apply_grad_op",
",",
"]",
")",
":",
"prepare_variables_op",
"=",
"self",
".",
"_prepare_variables",
"(",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"yellowfin\"",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"prepare_variables_op",
"]",
")",
":",
"yellowfin_op",
"=",
"self",
".",
"_yellowfin",
"(",
")",
"# Update YellowFin step variable.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"yellowfin_op",
"]",
")",
":",
"self",
".",
"_increment_step_op",
"=",
"tf",
".",
"assign_add",
"(",
"self",
".",
"_step",
",",
"1",
")",
".",
"op",
"return",
"tf",
".",
"group",
"(",
"apply_grad_op",
",",
"prepare_variables_op",
",",
"yellowfin_op",
",",
"self",
".",
"_increment_step_op",
")"
] |
Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
|
[
"Applying",
"gradients",
"and",
"tune",
"hyperparams",
"with",
"YellowFin",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L460-L519
|
21,892
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer.compute_gradients
|
def compute_gradients(self,
loss,
var_list,
global_step=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Compute gradients through momentum optimizer.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of tf.Variable to update
to minimize loss. Defaults to the list of variables collected
in the graph under the key GraphKey.TRAINABLE_VARIABLES.
global_step: Optional Variable to increment by one after the
variables have been updated.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine
gradient terms. Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation. Default to the name
passed to the Optimizer constructor.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
A list of (gradient, variable) pairs. Variable is always present,
but gradient can be None.
"""
del global_step, name # Unused for now.
return self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
|
python
|
def compute_gradients(self,
loss,
var_list,
global_step=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Compute gradients through momentum optimizer.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of tf.Variable to update
to minimize loss. Defaults to the list of variables collected
in the graph under the key GraphKey.TRAINABLE_VARIABLES.
global_step: Optional Variable to increment by one after the
variables have been updated.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine
gradient terms. Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation. Default to the name
passed to the Optimizer constructor.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
A list of (gradient, variable) pairs. Variable is always present,
but gradient can be None.
"""
del global_step, name # Unused for now.
return self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
|
[
"def",
"compute_gradients",
"(",
"self",
",",
"loss",
",",
"var_list",
",",
"global_step",
"=",
"None",
",",
"gate_gradients",
"=",
"GATE_OP",
",",
"aggregation_method",
"=",
"None",
",",
"colocate_gradients_with_ops",
"=",
"False",
",",
"name",
"=",
"None",
",",
"grad_loss",
"=",
"None",
")",
":",
"del",
"global_step",
",",
"name",
"# Unused for now.",
"return",
"self",
".",
"_momentum_optimizer",
".",
"compute_gradients",
"(",
"loss",
",",
"var_list",
"=",
"var_list",
",",
"gate_gradients",
"=",
"gate_gradients",
",",
"aggregation_method",
"=",
"aggregation_method",
",",
"colocate_gradients_with_ops",
"=",
"colocate_gradients_with_ops",
",",
"grad_loss",
"=",
"grad_loss",
")"
] |
Compute gradients through momentum optimizer.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of tf.Variable to update
to minimize loss. Defaults to the list of variables collected
in the graph under the key GraphKey.TRAINABLE_VARIABLES.
global_step: Optional Variable to increment by one after the
variables have been updated.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine
gradient terms. Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation. Default to the name
passed to the Optimizer constructor.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
A list of (gradient, variable) pairs. Variable is always present,
but gradient can be None.
|
[
"Compute",
"gradients",
"through",
"momentum",
"optimizer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L521-L560
|
21,893
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/yellowfin.py
|
YellowFinOptimizer.minimize
|
def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Adapted from TensorFlow Optimizer base class member function.
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
Args:
loss: A Tensor containing the value to minimize.
global_step: Optional Variable to increment by one after the variables
have been updated.
var_list: Optional list or tuple of Variable objects to update to
minimize loss. Defaults to the list of variables collected in
the graph under the key GraphKeys.TRAINABLE_VARIABLES.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
An Operation that updates the variables in var_list.
If global_step was not None, that operation also increments global_step.
Raises:
ValueError: if no gradients are provided for any variable.
"""
grads_and_vars = self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
for g, v in grads_and_vars:
print("g ", g)
print("v ", v)
return self.apply_gradients(grads_and_vars,
global_step=global_step,
name=name)
|
python
|
def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Adapted from TensorFlow Optimizer base class member function.
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
Args:
loss: A Tensor containing the value to minimize.
global_step: Optional Variable to increment by one after the variables
have been updated.
var_list: Optional list or tuple of Variable objects to update to
minimize loss. Defaults to the list of variables collected in
the graph under the key GraphKeys.TRAINABLE_VARIABLES.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
An Operation that updates the variables in var_list.
If global_step was not None, that operation also increments global_step.
Raises:
ValueError: if no gradients are provided for any variable.
"""
grads_and_vars = self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
for g, v in grads_and_vars:
print("g ", g)
print("v ", v)
return self.apply_gradients(grads_and_vars,
global_step=global_step,
name=name)
|
[
"def",
"minimize",
"(",
"self",
",",
"loss",
",",
"global_step",
"=",
"None",
",",
"var_list",
"=",
"None",
",",
"gate_gradients",
"=",
"GATE_OP",
",",
"aggregation_method",
"=",
"None",
",",
"colocate_gradients_with_ops",
"=",
"False",
",",
"name",
"=",
"None",
",",
"grad_loss",
"=",
"None",
")",
":",
"grads_and_vars",
"=",
"self",
".",
"_momentum_optimizer",
".",
"compute_gradients",
"(",
"loss",
",",
"var_list",
"=",
"var_list",
",",
"gate_gradients",
"=",
"gate_gradients",
",",
"aggregation_method",
"=",
"aggregation_method",
",",
"colocate_gradients_with_ops",
"=",
"colocate_gradients_with_ops",
",",
"grad_loss",
"=",
"grad_loss",
")",
"vars_with_grad",
"=",
"[",
"v",
"for",
"g",
",",
"v",
"in",
"grads_and_vars",
"if",
"g",
"is",
"not",
"None",
"]",
"if",
"not",
"vars_with_grad",
":",
"raise",
"ValueError",
"(",
"\"No gradients provided for any variable, check your graph for ops\"",
"\" that do not support gradients, between variables %s and loss %s.\"",
"%",
"(",
"[",
"str",
"(",
"v",
")",
"for",
"_",
",",
"v",
"in",
"grads_and_vars",
"]",
",",
"loss",
")",
")",
"for",
"g",
",",
"v",
"in",
"grads_and_vars",
":",
"print",
"(",
"\"g \"",
",",
"g",
")",
"print",
"(",
"\"v \"",
",",
"v",
")",
"return",
"self",
".",
"apply_gradients",
"(",
"grads_and_vars",
",",
"global_step",
"=",
"global_step",
",",
"name",
"=",
"name",
")"
] |
Adapted from TensorFlow Optimizer base class member function.
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
Args:
loss: A Tensor containing the value to minimize.
global_step: Optional Variable to increment by one after the variables
have been updated.
var_list: Optional list or tuple of Variable objects to update to
minimize loss. Defaults to the list of variables collected in
the graph under the key GraphKeys.TRAINABLE_VARIABLES.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
An Operation that updates the variables in var_list.
If global_step was not None, that operation also increments global_step.
Raises:
ValueError: if no gradients are provided for any variable.
|
[
"Adapted",
"from",
"TensorFlow",
"Optimizer",
"base",
"class",
"member",
"function",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L562-L622
|
21,894
|
tensorflow/tensor2tensor
|
tensor2tensor/models/bytenet.py
|
bytenet_internal
|
def bytenet_internal(inputs, targets, hparams):
"""ByteNet, main step used for training."""
with tf.variable_scope("bytenet"):
# Flatten inputs and extend length by 50%.
inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1]))
inputs_shape = inputs.shape.as_list()
inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]])
inputs_shape[1] = None
inputs.set_shape(inputs_shape) # Don't lose the other shapes when padding.
# Pad inputs and targets to be the same length, divisible by 50.
inputs, targets = common_layers.pad_to_same_length(
inputs, targets, final_length_divisible_by=50)
final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat,
"SAME", "encoder", hparams)
shifted_targets = common_layers.shift_right(targets)
kernel = (hparams.kernel_height, hparams.kernel_width)
decoder_start = common_layers.conv_block(
tf.concat([final_encoder, shifted_targets], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
padding="LEFT")
return residual_dilated_conv(decoder_start, hparams.num_block_repeat,
"LEFT", "decoder", hparams)
|
python
|
def bytenet_internal(inputs, targets, hparams):
"""ByteNet, main step used for training."""
with tf.variable_scope("bytenet"):
# Flatten inputs and extend length by 50%.
inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1]))
inputs_shape = inputs.shape.as_list()
inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]])
inputs_shape[1] = None
inputs.set_shape(inputs_shape) # Don't lose the other shapes when padding.
# Pad inputs and targets to be the same length, divisible by 50.
inputs, targets = common_layers.pad_to_same_length(
inputs, targets, final_length_divisible_by=50)
final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat,
"SAME", "encoder", hparams)
shifted_targets = common_layers.shift_right(targets)
kernel = (hparams.kernel_height, hparams.kernel_width)
decoder_start = common_layers.conv_block(
tf.concat([final_encoder, shifted_targets], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
padding="LEFT")
return residual_dilated_conv(decoder_start, hparams.num_block_repeat,
"LEFT", "decoder", hparams)
|
[
"def",
"bytenet_internal",
"(",
"inputs",
",",
"targets",
",",
"hparams",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"bytenet\"",
")",
":",
"# Flatten inputs and extend length by 50%.",
"inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"common_layers",
".",
"flatten4d3d",
"(",
"inputs",
")",
",",
"axis",
"=",
"2",
")",
"extend_length",
"=",
"tf",
".",
"to_int32",
"(",
"0.5",
"*",
"tf",
".",
"to_float",
"(",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"1",
"]",
")",
")",
"inputs_shape",
"=",
"inputs",
".",
"shape",
".",
"as_list",
"(",
")",
"inputs",
"=",
"tf",
".",
"pad",
"(",
"inputs",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"extend_length",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"inputs_shape",
"[",
"1",
"]",
"=",
"None",
"inputs",
".",
"set_shape",
"(",
"inputs_shape",
")",
"# Don't lose the other shapes when padding.",
"# Pad inputs and targets to be the same length, divisible by 50.",
"inputs",
",",
"targets",
"=",
"common_layers",
".",
"pad_to_same_length",
"(",
"inputs",
",",
"targets",
",",
"final_length_divisible_by",
"=",
"50",
")",
"final_encoder",
"=",
"residual_dilated_conv",
"(",
"inputs",
",",
"hparams",
".",
"num_block_repeat",
",",
"\"SAME\"",
",",
"\"encoder\"",
",",
"hparams",
")",
"shifted_targets",
"=",
"common_layers",
".",
"shift_right",
"(",
"targets",
")",
"kernel",
"=",
"(",
"hparams",
".",
"kernel_height",
",",
"hparams",
".",
"kernel_width",
")",
"decoder_start",
"=",
"common_layers",
".",
"conv_block",
"(",
"tf",
".",
"concat",
"(",
"[",
"final_encoder",
",",
"shifted_targets",
"]",
",",
"axis",
"=",
"3",
")",
",",
"hparams",
".",
"hidden_size",
",",
"[",
"(",
"(",
"1",
",",
"1",
")",
",",
"kernel",
")",
"]",
",",
"padding",
"=",
"\"LEFT\"",
")",
"return",
"residual_dilated_conv",
"(",
"decoder_start",
",",
"hparams",
".",
"num_block_repeat",
",",
"\"LEFT\"",
",",
"\"decoder\"",
",",
"hparams",
")"
] |
ByteNet, main step used for training.
|
[
"ByteNet",
"main",
"step",
"used",
"for",
"training",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/bytenet.py#L50-L74
|
21,895
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/snli.py
|
_download_and_parse_dataset
|
def _download_and_parse_dataset(tmp_dir, train):
"""Downloads and prepairs the dataset to be parsed by the data_generator."""
file_path = generator_utils.maybe_download(tmp_dir, _SNLI_ZIP, _SNLI_URL)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
file_name = 'train' if train else 'dev'
dataset_file_path = os.path.join(tmp_dir, _SNLI_DATA_PATH % file_name)
_parse_dataset(dataset_file_path, tmp_dir, train)
|
python
|
def _download_and_parse_dataset(tmp_dir, train):
"""Downloads and prepairs the dataset to be parsed by the data_generator."""
file_path = generator_utils.maybe_download(tmp_dir, _SNLI_ZIP, _SNLI_URL)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
file_name = 'train' if train else 'dev'
dataset_file_path = os.path.join(tmp_dir, _SNLI_DATA_PATH % file_name)
_parse_dataset(dataset_file_path, tmp_dir, train)
|
[
"def",
"_download_and_parse_dataset",
"(",
"tmp_dir",
",",
"train",
")",
":",
"file_path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"tmp_dir",
",",
"_SNLI_ZIP",
",",
"_SNLI_URL",
")",
"zip_ref",
"=",
"zipfile",
".",
"ZipFile",
"(",
"file_path",
",",
"'r'",
")",
"zip_ref",
".",
"extractall",
"(",
"tmp_dir",
")",
"zip_ref",
".",
"close",
"(",
")",
"file_name",
"=",
"'train'",
"if",
"train",
"else",
"'dev'",
"dataset_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"_SNLI_DATA_PATH",
"%",
"file_name",
")",
"_parse_dataset",
"(",
"dataset_file_path",
",",
"tmp_dir",
",",
"train",
")"
] |
Downloads and prepairs the dataset to be parsed by the data_generator.
|
[
"Downloads",
"and",
"prepairs",
"the",
"dataset",
"to",
"be",
"parsed",
"by",
"the",
"data_generator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/snli.py#L51-L60
|
21,896
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/snli.py
|
_get_tokens_and_tags
|
def _get_tokens_and_tags(parse_str):
"""Parse str to tokens and pos tags."""
tokens = []
parse_split = parse_str.split(' ')
for p in parse_split:
assert p.startswith('(') or p.endswith(')')
if p.endswith(')'):
token = p.replace(')', '')
tokens.append(token)
return tokens
|
python
|
def _get_tokens_and_tags(parse_str):
"""Parse str to tokens and pos tags."""
tokens = []
parse_split = parse_str.split(' ')
for p in parse_split:
assert p.startswith('(') or p.endswith(')')
if p.endswith(')'):
token = p.replace(')', '')
tokens.append(token)
return tokens
|
[
"def",
"_get_tokens_and_tags",
"(",
"parse_str",
")",
":",
"tokens",
"=",
"[",
"]",
"parse_split",
"=",
"parse_str",
".",
"split",
"(",
"' '",
")",
"for",
"p",
"in",
"parse_split",
":",
"assert",
"p",
".",
"startswith",
"(",
"'('",
")",
"or",
"p",
".",
"endswith",
"(",
"')'",
")",
"if",
"p",
".",
"endswith",
"(",
"')'",
")",
":",
"token",
"=",
"p",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"tokens",
".",
"append",
"(",
"token",
")",
"return",
"tokens"
] |
Parse str to tokens and pos tags.
|
[
"Parse",
"str",
"to",
"tokens",
"and",
"pos",
"tags",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/snli.py#L63-L73
|
21,897
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/snli.py
|
_parse_dataset
|
def _parse_dataset(file_path, tmp_dir, train):
"""Convert the dataset in to a simpler format.
This function creates two files. One for being processed to produce a vocab
and another to generate the data.
Args:
file_path: string, path to the file to parse.
tmp_dir: string, path to the directory to output the files.
train: bool, indicating if we are parsing the training set.
"""
input_path = file_path
file_name = 'train' if train else 'dev'
gen_output_path = os.path.join(tmp_dir, file_name + '.txt')
example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE)
print('input path: ' + input_path)
print('gen_output_path: ' + gen_output_path)
print('example_output_path: ' + example_output_path)
input_file = tf.gfile.Open(input_path, mode='r')
examples = []
for counter, line in enumerate(input_file):
if counter == 0: # Ignore first line since its a header.
continue
# Get the token and embedding vector.
line_split = line.split('\t')
parse1 = line_split[_PARSE1_INDEX]
parse2 = line_split[_PARSE2_INDEX]
consensus_label = line_split[_LABEL_INDEX]
tokens1 = _get_tokens_and_tags(parse1)
tokens2 = _get_tokens_and_tags(parse2)
tokens1_str = ' '.join(tokens1)
tokens2_str = ' '.join(tokens2)
if consensus_label != '-':
examples.append([tokens1_str, tokens2_str, consensus_label])
input_file.close()
# Output tab delimited file of lines of examples (sentence1, sentence2, label)
with tf.gfile.GFile(gen_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s\t%s\t%s\n' % (tokens1_str, tokens2_str, consensus_label))
if train:
# Output file containing all the sentences for generating the vocab from.
with tf.gfile.GFile(example_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s %s\n' % (tokens1_str, tokens2_str))
|
python
|
def _parse_dataset(file_path, tmp_dir, train):
"""Convert the dataset in to a simpler format.
This function creates two files. One for being processed to produce a vocab
and another to generate the data.
Args:
file_path: string, path to the file to parse.
tmp_dir: string, path to the directory to output the files.
train: bool, indicating if we are parsing the training set.
"""
input_path = file_path
file_name = 'train' if train else 'dev'
gen_output_path = os.path.join(tmp_dir, file_name + '.txt')
example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE)
print('input path: ' + input_path)
print('gen_output_path: ' + gen_output_path)
print('example_output_path: ' + example_output_path)
input_file = tf.gfile.Open(input_path, mode='r')
examples = []
for counter, line in enumerate(input_file):
if counter == 0: # Ignore first line since its a header.
continue
# Get the token and embedding vector.
line_split = line.split('\t')
parse1 = line_split[_PARSE1_INDEX]
parse2 = line_split[_PARSE2_INDEX]
consensus_label = line_split[_LABEL_INDEX]
tokens1 = _get_tokens_and_tags(parse1)
tokens2 = _get_tokens_and_tags(parse2)
tokens1_str = ' '.join(tokens1)
tokens2_str = ' '.join(tokens2)
if consensus_label != '-':
examples.append([tokens1_str, tokens2_str, consensus_label])
input_file.close()
# Output tab delimited file of lines of examples (sentence1, sentence2, label)
with tf.gfile.GFile(gen_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s\t%s\t%s\n' % (tokens1_str, tokens2_str, consensus_label))
if train:
# Output file containing all the sentences for generating the vocab from.
with tf.gfile.GFile(example_output_path, 'w') as f:
for tokens1_str, tokens2_str, consensus_label in examples:
f.write('%s %s\n' % (tokens1_str, tokens2_str))
|
[
"def",
"_parse_dataset",
"(",
"file_path",
",",
"tmp_dir",
",",
"train",
")",
":",
"input_path",
"=",
"file_path",
"file_name",
"=",
"'train'",
"if",
"train",
"else",
"'dev'",
"gen_output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"file_name",
"+",
"'.txt'",
")",
"example_output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"_EXAMPLES_FILE",
")",
"print",
"(",
"'input path: '",
"+",
"input_path",
")",
"print",
"(",
"'gen_output_path: '",
"+",
"gen_output_path",
")",
"print",
"(",
"'example_output_path: '",
"+",
"example_output_path",
")",
"input_file",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"input_path",
",",
"mode",
"=",
"'r'",
")",
"examples",
"=",
"[",
"]",
"for",
"counter",
",",
"line",
"in",
"enumerate",
"(",
"input_file",
")",
":",
"if",
"counter",
"==",
"0",
":",
"# Ignore first line since its a header.",
"continue",
"# Get the token and embedding vector.",
"line_split",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"parse1",
"=",
"line_split",
"[",
"_PARSE1_INDEX",
"]",
"parse2",
"=",
"line_split",
"[",
"_PARSE2_INDEX",
"]",
"consensus_label",
"=",
"line_split",
"[",
"_LABEL_INDEX",
"]",
"tokens1",
"=",
"_get_tokens_and_tags",
"(",
"parse1",
")",
"tokens2",
"=",
"_get_tokens_and_tags",
"(",
"parse2",
")",
"tokens1_str",
"=",
"' '",
".",
"join",
"(",
"tokens1",
")",
"tokens2_str",
"=",
"' '",
".",
"join",
"(",
"tokens2",
")",
"if",
"consensus_label",
"!=",
"'-'",
":",
"examples",
".",
"append",
"(",
"[",
"tokens1_str",
",",
"tokens2_str",
",",
"consensus_label",
"]",
")",
"input_file",
".",
"close",
"(",
")",
"# Output tab delimited file of lines of examples (sentence1, sentence2, label)",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"gen_output_path",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"tokens1_str",
",",
"tokens2_str",
",",
"consensus_label",
"in",
"examples",
":",
"f",
".",
"write",
"(",
"'%s\\t%s\\t%s\\n'",
"%",
"(",
"tokens1_str",
",",
"tokens2_str",
",",
"consensus_label",
")",
")",
"if",
"train",
":",
"# Output file containing all the sentences for generating the vocab from.",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"example_output_path",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"tokens1_str",
",",
"tokens2_str",
",",
"consensus_label",
"in",
"examples",
":",
"f",
".",
"write",
"(",
"'%s %s\\n'",
"%",
"(",
"tokens1_str",
",",
"tokens2_str",
")",
")"
] |
Convert the dataset in to a simpler format.
This function creates two files. One for being processed to produce a vocab
and another to generate the data.
Args:
file_path: string, path to the file to parse.
tmp_dir: string, path to the directory to output the files.
train: bool, indicating if we are parsing the training set.
|
[
"Convert",
"the",
"dataset",
"in",
"to",
"a",
"simpler",
"format",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/snli.py#L76-L128
|
21,898
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/snli.py
|
_get_or_generate_vocab
|
def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size):
"""Read or create vocabulary."""
vocab_filepath = os.path.join(tmp_dir, vocab_filename)
print('Vocab file written to: ' + vocab_filepath)
if tf.gfile.Exists(vocab_filepath):
gs = text_encoder.SubwordTextEncoder(vocab_filepath)
return gs
example_file = os.path.join(tmp_dir, _EXAMPLES_FILE)
gs = text_encoder.SubwordTextEncoder()
token_counts = tokenizer.corpus_token_counts(
example_file, corpus_max_lines=1000000)
gs = gs.build_to_target_size(
vocab_size, token_counts, min_val=1, max_val=1e3)
gs.store_to_file(vocab_filepath)
return gs
|
python
|
def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size):
"""Read or create vocabulary."""
vocab_filepath = os.path.join(tmp_dir, vocab_filename)
print('Vocab file written to: ' + vocab_filepath)
if tf.gfile.Exists(vocab_filepath):
gs = text_encoder.SubwordTextEncoder(vocab_filepath)
return gs
example_file = os.path.join(tmp_dir, _EXAMPLES_FILE)
gs = text_encoder.SubwordTextEncoder()
token_counts = tokenizer.corpus_token_counts(
example_file, corpus_max_lines=1000000)
gs = gs.build_to_target_size(
vocab_size, token_counts, min_val=1, max_val=1e3)
gs.store_to_file(vocab_filepath)
return gs
|
[
"def",
"_get_or_generate_vocab",
"(",
"tmp_dir",
",",
"vocab_filename",
",",
"vocab_size",
")",
":",
"vocab_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"vocab_filename",
")",
"print",
"(",
"'Vocab file written to: '",
"+",
"vocab_filepath",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_filepath",
")",
":",
"gs",
"=",
"text_encoder",
".",
"SubwordTextEncoder",
"(",
"vocab_filepath",
")",
"return",
"gs",
"example_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"_EXAMPLES_FILE",
")",
"gs",
"=",
"text_encoder",
".",
"SubwordTextEncoder",
"(",
")",
"token_counts",
"=",
"tokenizer",
".",
"corpus_token_counts",
"(",
"example_file",
",",
"corpus_max_lines",
"=",
"1000000",
")",
"gs",
"=",
"gs",
".",
"build_to_target_size",
"(",
"vocab_size",
",",
"token_counts",
",",
"min_val",
"=",
"1",
",",
"max_val",
"=",
"1e3",
")",
"gs",
".",
"store_to_file",
"(",
"vocab_filepath",
")",
"return",
"gs"
] |
Read or create vocabulary.
|
[
"Read",
"or",
"create",
"vocabulary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/snli.py#L131-L146
|
21,899
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/wikisum/get_references_web_single_group.py
|
shard
|
def shard(items, num_shards):
"""Split items into num_shards groups."""
sharded = []
num_per_shard = len(items) // num_shards
start = 0
for _ in range(num_shards):
sharded.append(items[start:start + num_per_shard])
start += num_per_shard
remainder = len(items) % num_shards
start = len(items) - remainder
for i in range(remainder):
sharded[i].append(items[start + i])
assert sum([len(fs) for fs in sharded]) == len(items)
return sharded
|
python
|
def shard(items, num_shards):
"""Split items into num_shards groups."""
sharded = []
num_per_shard = len(items) // num_shards
start = 0
for _ in range(num_shards):
sharded.append(items[start:start + num_per_shard])
start += num_per_shard
remainder = len(items) % num_shards
start = len(items) - remainder
for i in range(remainder):
sharded[i].append(items[start + i])
assert sum([len(fs) for fs in sharded]) == len(items)
return sharded
|
[
"def",
"shard",
"(",
"items",
",",
"num_shards",
")",
":",
"sharded",
"=",
"[",
"]",
"num_per_shard",
"=",
"len",
"(",
"items",
")",
"//",
"num_shards",
"start",
"=",
"0",
"for",
"_",
"in",
"range",
"(",
"num_shards",
")",
":",
"sharded",
".",
"append",
"(",
"items",
"[",
"start",
":",
"start",
"+",
"num_per_shard",
"]",
")",
"start",
"+=",
"num_per_shard",
"remainder",
"=",
"len",
"(",
"items",
")",
"%",
"num_shards",
"start",
"=",
"len",
"(",
"items",
")",
"-",
"remainder",
"for",
"i",
"in",
"range",
"(",
"remainder",
")",
":",
"sharded",
"[",
"i",
"]",
".",
"append",
"(",
"items",
"[",
"start",
"+",
"i",
"]",
")",
"assert",
"sum",
"(",
"[",
"len",
"(",
"fs",
")",
"for",
"fs",
"in",
"sharded",
"]",
")",
"==",
"len",
"(",
"items",
")",
"return",
"sharded"
] |
Split items into num_shards groups.
|
[
"Split",
"items",
"into",
"num_shards",
"groups",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/get_references_web_single_group.py#L87-L102
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.