desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Test that initializers are not mutated.'
| def testInitializerMutation(self):
| initializers = {'b': tf.constant_initializer(0)}
initializers_copy = dict(initializers)
conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, name='conv1', initializers=initializers)
conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2]))
self.assertAllEqual(initializers, initializers_copy)
|
'Run through for something with a known answer using SAME padding.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testComputationSame(self, use_bias):
| conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias))
out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32)))
expected_out = np.asarray([9, 13, 13, 13, 9, 13, 19, 19, 19, ... |
'Run through for something with a known answer using snt.VALID padding.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testComputationValid(self, use_bias):
| conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name='conv1', use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias))
out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32)))
expected_out = np.asarray(([28] * 27)).reshape((3, 3, 3))
... |
'Sharing is working.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testSharing(self, use_bias):
| conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name='conv1')
x = np.random.randn(1, 5, 5, 5, 1)
x1 = tf.constant(x, dtype=np.float32)
x2 = tf.constant(x, dtype=np.float32)
out1 = conv1(x1)
out2 = conv1(x2)
with self.test_session():
... |
'Set up some variables to re-use in multiple tests.'
| def setUp(self):
| super(Conv3DTransposeTest, self).setUp()
self.batch_size = 7
self.in_depth = 7
self.in_height = 7
self.in_width = 11
self.in_channels = 4
self.out_channels = 10
self.kernel_shape_d = 5
self.kernel_shape_h = 5
self.kernel_shape_w = 7
self.stride_d = 1
self.stride_h = 1
... |
'Tests if output shapes are valid.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testOutputShapeConsistency(self, use_bias):
| inputs = tf.placeholder(tf.float32, shape=self.in_shape)
conv1 = snt.Conv3DTranspose(name='conv3d_1', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias)
outputs = conv1(inputs)
self.assertTrue(outputs.get_sha... |
'Tests if output shapes are valid when specified as an integer.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testOutputShapeInteger(self, use_bias):
| inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32)
inputs_2 = tf.zeros(shape=[3, 5, 7, 5, 2], dtype=tf.float32)
conv1 = snt.Conv3DTranspose(name='conv3d_1', output_channels=10, output_shape=10, kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias)
outputs = conv1(inputs)
outputs_2 =... |
'Tests if the correct ouput shapes are setup in transposed module.'
| @parameterized.NamedParameters(('WithBias', True), ('WithoutBias', False))
def testTransposition(self, use_bias):
| net = snt.Conv3DTranspose(name='conv3d_3', output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias)
net_transpose = net.transpose()
input_to_net = tf.placeholder(tf.float32, shape=self.in_shape)
err = 'Variables ... |
'Check that differing reduction indices give the correct output shape.'
| def testReductionIndices(self):
| inputs = tf.placeholder(tf.float32, shape=[None, 64, 32, 3])
bn1 = snt.BatchNorm(axis=[0], offset=False)
bn1(inputs, is_training=True)
self.assertEqual(bn1.moving_mean.get_shape(), (1, 64, 32, 3))
bn2 = snt.BatchNorm(axis=[0, 1], offset=False)
bn2(inputs, is_training=True)
self.assertEqual(b... |
'Test that using moving_mean as shift improves statistics.'
| def testShiftImproveStatistics(self):
| (_, _, inputs) = self._get_inputs()
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.1)
out1 = bn(inputs, is_training=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_v = sess.run(out1)
self.assertAllClose(np.zeros([7, 6]), out_v, ... |
'The correct statistics are being computed for double connection.
Connected in parallel, it\'s ill-defined what order the updates will happen
in. A double update could happen, or two sequential updates. E.g. If
decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the
value could progress as
1.00 -> 0... | @parameterized.NamedParameters(('Float32', tf.float32), ('Float64', tf.float64))
def testCheckStatsDouble(self, dtype):
| (v, _, inputs) = self._get_inputs(dtype)
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.9)
with tf.name_scope('net1'):
bn(inputs, is_training=True)
with tf.name_scope('net2'):
bn(inputs, is_training=True)
update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'ne... |
'The correct normalization is being used for different Python flags.'
| def testCheckStatsPython(self):
| (v, input_v, inputs) = self._get_inputs()
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5)
out1 = bn(inputs, is_training=True, test_local_stats=True)
out2 = bn(inputs, is_training=False, test_local_stats=True)
out3 = bn(inputs, is_training=False, test_local_stats=False)
update_ops =... |
'The correct normalization is being used for different TF flags.'
| @parameterized.NamedParameters(('UseUpdateCollection', tf.GraphKeys.UPDATE_OPS), ('UseDifferentUpdateCollection', 'my_update_ops'), ('UseControlDependencies', None))
def testCheckStatsInGraph(self, update_ops_collection):
| (v, input_v, inputs) = self._get_inputs()
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5, update_ops_collection=update_ops_collection)
is_training = tf.placeholder(tf.bool)
test_local_stats = tf.placeholder(tf.bool)
out = bn(inputs, is_training=is_training, test_local_stats=test_local_... |
'Check that the correct number of variables are made when sharing.'
| def testSharing(self):
| inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
bn = snt.BatchNorm(offset=True, scale=True)
bn(inputs1, is_training=True)
bn(inputs2, is_training=False)
self.assertEqual(len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLE... |
'Demonstrate that updates inside a cond fail.'
| def testUpdatesInsideCond(self):
| (_, input_v, inputs) = self._get_inputs()
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5)
condition = tf.placeholder(tf.bool)
cond = tf.cond(condition, (lambda : bn(inputs, is_training=True)), (lambda : inputs))
init = tf.global_variables_initializer()
with self.test_session() as s... |
'Check the inputs batch_size can change.'
| def testVariableBatchSize(self):
| inputs_shape = [10, 10]
inputs = tf.placeholder(tf.float32, shape=([None] + inputs_shape))
bn = snt.BatchNorm(offset=False, scale=False)
out = bn(inputs, is_training=False, test_local_stats=False)
init = tf.global_variables_initializer()
update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_O... |
'See `__init__` of `LSTM` and `BatchNormLSTM` for docs.'
| def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, use_peepholes=False, use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False, use_layer_norm=False, max_unique_stats=1, hidden_clip_value=None, cell_clip_value=None, name='lstm'):
| super(_BaseLSTM, self).__init__(name=name)
self._hidden_size = hidden_size
self._forget_bias = forget_bias
self._use_peepholes = use_peepholes
self._max_unique_stats = max_unique_stats
self._use_batch_norm_h = use_batch_norm_h
self._use_batch_norm_x = use_batch_norm_x
self._use_batch_nor... |
'Wraps this RNNCore with the additional control input to the `BatchNorm`s.
Example usage:
lstm = snt.BatchNormLSTM(4)
is_training = tf.placeholder(tf.bool)
rnn_input = ...
my_rnn = rnn.rnn(lstm.with_batch_norm_control(is_training), rnn_input)
Args:
is_training: Boolean that indicates whether we are in
training mode or ... | def with_batch_norm_control(self, is_training, test_local_stats=True):
| return _BaseLSTM.CellWithExtraInput(self, is_training=is_training, test_local_stats=test_local_stats)
|
'Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output ga... | @classmethod
def get_possible_initializer_keys(cls, use_peepholes=False, use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False):
| possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if (not use_peepholes):
possible_keys.difference_update({cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if (not use_batch_norm_h):
possible_keys.remove(cls.GAMMA_H)
if (not use_batch_norm_x):
possible_keys.remove(cls.GAMMA_X)
i... |
'Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ f... | def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
| use_batch_norm = (self._use_batch_norm_c or self._use_batch_norm_h)
use_batch_norm = (use_batch_norm or self._use_batch_norm_x)
if (use_batch_norm and (is_training is None)):
raise ValueError('Boolean is_training flag must be explicitly specified when using batch normal... |
'Initialize the variables used for the `BatchNorm`s (if any).'
| def _create_batch_norm_variables(self, dtype):
| gamma_initializer = tf.constant_initializer(0.1)
if self._use_batch_norm_h:
self._gamma_h = tf.get_variable(self.GAMMA_H, shape=[(4 * self._hidden_size)], dtype=dtype, initializer=self._initializers.get(self.GAMMA_H, gamma_initializer), partitioner=self._partitioners.get(self.GAMMA_H), regularizer=self.... |
'Initialize the variables used for the gates.'
| def _create_gate_variables(self, input_shape, dtype):
| if (len(input_shape) != 2):
raise ValueError('Rank of shape must be {} not: {}'.format(2, len(input_shape)))
input_size = input_shape.dims[1].value
b_shape = [(4 * self._hidden_size)]
equiv_input_size = (self._hidden_size + input_size)
initializer = basic.create_linear_i... |
'Initialize the variables used for the peephole connections.'
| def _create_peephole_variables(self, dtype):
| self._w_f_diag = tf.get_variable(self.W_F_DIAG, shape=[self._hidden_size], dtype=dtype, initializer=self._initializers.get(self.W_F_DIAG), partitioner=self._partitioners.get(self.W_F_DIAG), regularizer=self._regularizers.get(self.W_F_DIAG))
self._w_i_diag = tf.get_variable(self.W_I_DIAG, shape=[self._hidden_siz... |
'Builds the default start state tensor of zeros.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An optional pair of initializers for the
initial hidden ... | def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None):
| if (self._max_unique_stats == 1):
return super(_BaseLSTM, self).initial_state(batch_size, dtype=dtype, trainable=trainable, trainable_initializers=trainable_initializers, trainable_regularizers=trainable_regularizers, name=name)
else:
with tf.name_scope(self._initial_state_scope(name)):
... |
'Tuple of `tf.TensorShape`s indicating the size of state tensors.'
| @property
def state_size(self):
| if (self._max_unique_stats == 1):
return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size]))
else:
return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size]), tf.TensorShape(1))
|
'`tf.TensorShape` indicating the size of the core output.'
| @property
def output_size(self):
| return tf.TensorShape([self._hidden_size])
|
'Boolean indicating whether peephole connections are used.'
| @property
def use_peepholes(self):
| return self._use_peepholes
|
'Boolean indicating whether batch norm for hidden -> gates is enabled.'
| @property
def use_batch_norm_h(self):
| return self._use_batch_norm_h
|
'Boolean indicating whether batch norm for input -> gates is enabled.'
| @property
def use_batch_norm_x(self):
| return self._use_batch_norm_x
|
'Boolean indicating whether batch norm for cell -> output is enabled.'
| @property
def use_batch_norm_c(self):
| return self._use_batch_norm_c
|
'Boolean indicating whether layer norm is enabled.'
| @property
def use_layer_norm(self):
| return self._use_layer_norm
|
'Create an IndexedStatsBatchNorm.
Args:
max_unique_stats: number of different indices to have statistics for;
indices beyond this will use the final statistics.
name: Name of the module.'
| def __init__(self, max_unique_stats, name=None):
| super(_BaseLSTM.IndexedStatsBatchNorm, self).__init__(name=name)
self._max_unique_stats = max_unique_stats
|
'Add the IndexedStatsBatchNorm module to the graph.
Args:
inputs: Tensor to apply batch norm to.
index: Scalar TensorFlow int32 value to select the batch norm index.
is_training: Boolean to indicate to `snt.BatchNorm` if we are
currently training.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normal... | def _build(self, inputs, index, is_training, test_local_stats):
| def create_batch_norm():
return batch_norm.BatchNorm(offset=False, scale=False)(inputs, is_training, test_local_stats)
if (self._max_unique_stats > 1):
pred_fn_pairs = [(tf.equal(i, index), create_batch_norm) for i in xrange((self._max_unique_stats - 1))]
out = tf.case(pred_fn_pairs, cre... |
'Construct the CellWithExtraInput.
Args:
cell: The RNNCell to wrap (typically a snt.RNNCore).
*args: Extra arguments to pass to __call__.
**kwargs: Extra keyword arguments to pass to __call__.'
| def __init__(self, cell, *args, **kwargs):
| self._cell = cell
self._args = args
self._kwargs = kwargs
|
'Tuple indicating the size of nested state tensors.'
| @property
def state_size(self):
| return self._cell.state_size
|
'`tf.TensorShape` indicating the size of the core output.'
| @property
def output_size(self):
| return self._cell.output_size
|
'Construct LSTM.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`LSTM.get_possible_initializer_keys`.
The gamma and beta variables control batch... | @deprecation.deprecated_args('2017-09-18', 'Please switch from LSTM to BatchNormLSTM if you need batch norm functionality.', 'use_batch_norm_h', 'use_batch_norm_x', 'use_batch_norm_c', 'max_unique_stats')
def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=N... | super(LSTM, self).__init__(hidden_size, forget_bias=forget_bias, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, use_layer_norm=use_layer_norm, max_u... |
'Construct `BatchNormLSTM`.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
The gamma and beta var... | def __init__(self, hidden_size, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False, use_batch_norm_c=False, max_unique_stats=1, hidden_clip_value=None, cell_clip_value=None, name='batch_norm_lstm'):
| if (not any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c])):
raise ValueError('At least one use_batch_norm_* option is required for BatchNormLSTM')
super(BatchNormLSTM, self).__init__(hidden_size, forget_bias=forget_bias, initializers=initializers, partitioners=partition... |
'Construct ConvLSTM.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as tuple, excluding the batch size.
output_channels: Number of output channels of the conv LSTM.
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensi... | def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, stride=1, padding=conv.SAME, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, partitioners=None, regularizers=None, name='conv_lstm'):
| super(ConvLSTM, self).__init__(name=name)
self._conv_class = self._get_conv_class(conv_ndims)
if (skip_connection and (stride != 1)):
raise ValueError('`stride` needs to be 1 when using skip connection')
if (conv_ndims != (len(input_shape) - 1)):
raise ValueError(... |
'Tuple of `tf.TensorShape`s indicating the size of state tensors.'
| @property
def state_size(self):
| hidden_size = tf.TensorShape((self._input_shape[:(-1)] + (self._output_channels,)))
return (hidden_size, hidden_size)
|
'`tf.TensorShape` indicating the size of the core output.'
| @property
def output_size(self):
| return tf.TensorShape((self._input_shape[:(-1)] + (self._total_output_channels,)))
|
'Construct Conv1DLSTM. See `snt.ConvLSTM` for more details.'
| def __init__(self, name='conv_1d_lstm', **kwargs):
| super(Conv1DLSTM, self).__init__(conv_ndims=1, name=name, **kwargs)
|
'Construct Conv2DLSTM. See `snt.ConvLSTM` for more details.'
| def __init__(self, name='conv_2d_lstm', **kwargs):
| super(Conv2DLSTM, self).__init__(conv_ndims=2, name=name, **kwargs)
|
'Construct GRU.
Args:
hidden_size: (int) Hidden size dimensionality.
initializers: Dict containing ops to initialize the weights. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, ... | def __init__(self, hidden_size, initializers=None, partitioners=None, regularizers=None, name='gru'):
| super(GRU, self).__init__(name=name)
self._hidden_size = hidden_size
self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(regu... |
'Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wz: weight for input -> update cell
uz: weight for prev_state -> update cell
bz: bias for update_cell
wr: weight for input -> reset cell
ur: weight for prev_state -> reset cell
br: bias for reset cell
wh... | @classmethod
def get_possible_initializer_keys(cls):
| return super(GRU, cls).get_possible_initializer_keys(cls)
|
'Connects the GRU module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ fo... | def _build(self, inputs, prev_state):
| input_size = inputs.get_shape()[1]
weight_shape = (input_size, self._hidden_size)
u_shape = (self._hidden_size, self._hidden_size)
bias_shape = (self._hidden_size,)
self._wz = tf.get_variable(GRU.WZ, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WZ), partitioner=self._part... |
'Initialize ConstantZero module.
Args:
output_rank: int. Rank of value returned by build(). The default value (2)
imitates the output of the Linear module.
name: string. Name of module.'
| def __init__(self, output_rank=2, name='constant_zero'):
| super(ConstantZero, self).__init__(name=name)
self._output_rank = output_rank
|
'Attach ConstantZero module to graph.
Args:
inputs: [batch_size, input_size]-shaped Tensor of dtype float32.
Returns:
A Tensor with rank output_rank where the first dimension has length
batch_size and all others have length 1.'
| def _build(self, inputs):
| assert (inputs.get_shape().as_list()[(-1)] is not None)
batch_size = tf.shape(inputs)[0]
result_shape = ([batch_size] + ([1] * (self._output_rank - 1)))
return tf.zeros(result_shape, dtype=inputs.dtype)
|
'Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; fo... | def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
| super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
if (len(self._output_shape) > len(self._source_shape)):
raise base.Error('Output domain dimensionality ({}) must be equal or smaller than so... |
'Generates matrix of features, of size `[num_coeff, num_points]`.'
| @abc.abstractmethod
def _create_features(self, **kwargs):
| pass
|
'Returns number of coefficients of warping function.'
| @property
def n_coeff(self):
| return self._n_coeff
|
'Returns a list of features used to compute the grid warp.'
| @property
def psi(self):
| return self._psi
|
'Returns a tuple containing the shape of the source signal.'
| @property
def source_shape(self):
| return self._source_shape
|
'Returns a tuple containing the shape of the output grid.'
| @property
def output_shape(self):
| return self._output_shape
|
'Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H... | def __init__(self, source_shape, output_shape, constraints=None, name='affine_grid_warper'):
| self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif (constraints is None):
self._constraints = AffineWarpConstraints.no_constraints(num_di... |
'Creates all the matrices needed to compute the output warped grids.'
| def _create_features(self, constraints):
| affine_warp_constraints = constraints
if (not isinstance(affine_warp_constraints, AffineWarpConstraints)):
affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints)
mask = affine_warp_constraints.mask
psi = _create_affine_features(output_shape=self._output_shape, source_shape=self... |
'Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not co... | def _build(self, inputs):
| input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if (number_of_params != self._constraints.num_free_params):
raise base.Error('Input size is not consistent with c... |
'Returns a `sonnet` module to compute inverse affine transforms.
The function first assembles a network that given the constraints of the
current AffineGridWarper and a set of input parameters, retrieves the
coefficients of the corresponding inverse affine transform, then feeds its
output into a new AffineGridWarper se... | def inverse(self, name=None):
| if (self._num_coeff != 6):
raise tf.errors.UnimplementedError('AffineGridWarper currently supportsinversion only for the 2D case.')
def _affine_grid_warper_inverse(inputs):
'Assembles network to compute inverse affine transformation.\n\n ... |
'Creates a constraint definition for an affine transformation.
Args:
constraints: A doubly-nested iterable of shape `[N, N+1]` defining
constraints on the entries of a matrix that represents an affine
transformation in `N` dimensions. A numeric value bakes in a constraint
on the corresponding entry in the tranformation... | def __init__(self, constraints=((((None,) * 3),) * 2)):
| try:
self._constraints = tuple((tuple(x) for x in constraints))
except TypeError:
raise TypeError('constraints must be a nested iterable.')
self._num_dim = len(self._constraints)
expected_num_cols = (self._num_dim + 1)
if any(((len(x) != expected_num_cols) for x in sel... |
'Computes a boolean mask from the user defined constraints.'
| def _calc_mask(self):
| mask = []
for row in self._constraints:
mask.append(tuple(((x is None) for x in row)))
return tuple(mask)
|
'Computes number of non constrained parameters.'
| def _calc_num_free_params(self):
| return sum((row.count(None) for row in self._constraints))
|
'Returns the list of constraints for the i-th row of the affine matrix.'
| def __getitem__(self, i):
| return self._constraints[i]
|
'Combines two constraints, raising an error if they are not compatible.'
| def _combine(self, x, y):
| if ((x is None) or (y is None)):
return (x or y)
if (x != y):
raise ValueError('Incompatible set of constraints provided.')
return x
|
'Combines two sets of constraints into a coherent single set.'
| def __and__(self, rhs):
| return self.combine_with(rhs)
|
'Combines two sets of constraints into a coherent single set.'
| def combine_with(self, additional_constraints):
| x = additional_constraints
if (not isinstance(additional_constraints, AffineWarpConstraints)):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for (left, right) in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for (x, y) in zip(... |
'Empty set of constraints for a num_dim-ensional affine transform.'
| @classmethod
def no_constraints(cls, num_dim=2):
| return cls(((((None,) * (num_dim + 1)),) * num_dim))
|
'Assign contraints on translation components of affine transform in 2d.'
| @classmethod
def translation_2d(cls, x=None, y=None):
| return cls([[None, None, x], [None, None, y]])
|
'Assign contraints on translation components of affine transform in 3d.'
| @classmethod
def translation_3d(cls, x=None, y=None, z=None):
| return cls([[None, None, None, x], [None, None, None, y], [None, None, None, z]])
|
'Assigns contraints on scaling components of affine transform in 2d.'
| @classmethod
def scale_2d(cls, x=None, y=None):
| return cls([[x, None, None], [None, y, None]])
|
'Assigns contraints on scaling components of affine transform in 3d.'
| @classmethod
def scale_3d(cls, x=None, y=None, z=None):
| return cls([[x, None, None, None], [None, y, None, None], [None, None, z, None]])
|
'Assigns contraints on shear components of affine transform in 2d.'
| @classmethod
def shear_2d(cls, x=None, y=None):
| return cls([[None, x, None], [y, None, None]])
|
'Assigns contraints on shear components of affine transform in 3d.'
| @classmethod
def no_shear_3d(cls):
| return cls([[None, 0, 0, None], [0, None, 0, None], [0, 0, None, None]])
|
'Defines the name scope of the initial_state ops.'
| def _initial_state_scope(self, name):
| return (name if name else ('%s_initial_state' % self.scope_name))
|
'Builds the default start state for an RNNCore.
Args:
batch_size: An int, or scalar int32 Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
Note that intializers and regularizers will be ignored if
`trainable=False`.
trai... | def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None, **unused_kwargs):
| with tf.name_scope(self._initial_state_scope(name)):
if (not trainable):
return self.zero_state(batch_size, dtype)
else:
return trainable_initial_state(batch_size, self.state_size, dtype, initializers=trainable_initializers, regularizers=trainable_regularizers, name=self._ini... |
'size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.'
| @property
def state_size(self):
| raise NotImplementedError('Abstract method')
|
'Integer or TensorShape: size of outputs produced by this cell.'
| @property
def output_size(self):
| raise NotImplementedError('Abstract method')
|
'Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is ... | def zero_state(self, batch_size, dtype):
| with tf.name_scope((type(self).__name__ + 'ZeroState'), values=[batch_size]):
return rnn_cell_impl._zero_state_tensors(self.state_size, batch_size, dtype)
|
'Constructs the Module that introduces a trainable state in the graph.
It receives an initial state that will be used as the intial values for the
trainable variables that the module contains, and optionally a mask that
indicates the parts of the initial state that should be learnable.
Args:
initial_state: tensor or ar... | def __init__(self, initial_state, mask=None, name='trainable_initial_state'):
| super(TrainableInitialState, self).__init__(name=name)
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('Use the trainable flag in initial_state instead.', DeprecationWarning, stacklevel=2)
if (mask is not None):
flat_mask = nest.flatten(mask)
if (not a... |
'Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.'
| def _build(self):
| flat_initial_state = nest.flatten(self._initial_state)
if (self._mask is not None):
flat_mask = nest.flatten(self._mask)
flat_learnable_state = [_single_learnable_state(state, state_id=i, learnable=mask) for (i, (state, mask)) in enumerate(zip(flat_initial_state, flat_mask))]
else:
f... |
'Tests if calling __init__ without named args raises a ValueError.'
| def testInitNoNamedArgs(self):
| with self.assertRaises(ValueError):
NoInitIdentityModule('foobar')
|
'Tests if calling __init__ without a string name raises a TypeError.'
| def testInitInvalidTypeArgs(self):
| with self.assertRaises(TypeError):
NoInitIdentityModule(name=123)
|
'Tests if calling __init__ with no args uses correct defaults.'
| def testInitNoArgs(self):
| module = NoInitIdentityModule()
self.assertEqual(module.module_name, 'no_init_identity_module')
|
'Tests if a __call__ with no __init__ raises an error.'
| def testInitNoSuper(self):
| module = NoSuperInitIdentityModule()
with self.assertRaises(base.NotInitializedError):
module(tf.constant([1]))
|
'Initialize AttentiveRead module.
Args:
attention_logit_mod: Module that produces logit corresponding to a memory
slot\'s compatibility. Must map a [batch_size * memory_size,
memory_word_size + query_word_size]-shaped Tensor to a
[batch_size * memory_size, 1] shape Tensor.
name: string. Name for module.'
| def __init__(self, attention_logit_mod, name='attention'):
| super(AttentiveRead, self).__init__(name=name)
self._attention_logit_mod = attention_logit_mod
|
'Perform a differentiable read.
Args:
memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of
dtype float32. This represents, for each example and memory slot, a
single embedding to attend over.
query: [batch_size, query_word_size]-shaped Tensor of dtype float32.
Represents, for each example, a single embe... | def _build(self, memory, query, memory_mask=None):
| if (len(memory.get_shape()) != 3):
raise base.IncompatibleShapeError('memory must have shape [batch_size, memory_size, memory_word_size].')
if (len(query.get_shape()) != 2):
raise base.IncompatibleShapeError('query must have shape [batch_size, query_word_size].')... |
'Construct a Basic RNN core.
Args:
hidden_size: hidden size dimensionality.
activation: activation function to use.
initializers: optional dict containing ops to initialize the weights. This
dictionary may contain the keys \'in_to_hidden\' and/or
\'hidden_to_hidden\'.
partitioners: optional dict containing ops to parti... | def __init__(self, hidden_size, activation=tf.tanh, initializers=None, partitioners=None, regularizers=None, name='vanilla_rnn'):
| super(VanillaRNN, self).__init__(name=name)
self._hidden_size = hidden_size
self._activation = activation
self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._re... |
'Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may di... | def _build(self, input_, prev_state):
| self._in_to_hidden_linear = basic.Linear(self._hidden_size, name='in_to_hidden', initializers=self._initializers.get('in_to_hidden'), partitioners=self._partitioners.get('in_to_hidden'), regularizers=self._regularizers.get('in_to_hidden'))
self._hidden_to_hidden_linear = basic.Linear(self._hidden_size, name='hi... |
'Construct a Deep RNN core.
Args:
cores: iterable of modules or ops.
skip_connections: a boolean that indicates whether to use skip
connections. This means that the input is fed to all the layers, after
being concatenated with the output of the previous layer. The output
of the module will be the concatenation of all t... | def __init__(self, cores, skip_connections=True, concat_final_output_if_skip=True, name='deep_rnn'):
| super(DeepRNN, self).__init__(name=name)
if (not isinstance(cores, collections.Iterable)):
raise ValueError('Cores should be an iterable object.')
self._cores = tuple(cores)
self._skip_connections = skip_connections
self._concat_final_output_if_skip = concat_final_output_if_sk... |
'Checks the output_sizes of the cores of the DeepRNN module.
Raises:
ValueError: if the outputs of the cores cannot be concatenated along their
first dimension.'
| def _check_cores_output_sizes(self):
| for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
first_core_list = core_sizes[0][1:]
for (i, core_list) in enumerate(core_sizes[1:]):
if (core_list[1:] != first_core_list):
raise ValueError(('The outputs of the provided cores are not ... |
'Connects the DeepRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may diffe... | def _build(self, inputs, prev_state):
| current_input = inputs
next_states = []
outputs = []
recurrent_idx = 0
for (i, core) in enumerate(self._cores):
if (self._skip_connections and (i > 0)):
flat_input = (nest.flatten(inputs), nest.flatten(current_input))
flat_input = [tf.concat(input_, 1) for input_ in z... |
'Builds the default start state for a DeepRNN.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An initializer function or nested structure of
functions w... | def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None):
| initial_state = []
if (trainable_initializers is None):
trainable_initializers = ([None] * self._num_recurrent)
if (trainable_regularizers is None):
trainable_regularizers = ([None] * self._num_recurrent)
num_initializers = len(trainable_initializers)
if (num_initializers != self._nu... |
'Construct a Basic RNN core.
Args:
model: callable that computes the next state.
name: name of the module.
Raises:
TypeError: if model is not a callable object or if it is an RNNCore.
AttributeError: if model does not have an output_size attribute.'
| def __init__(self, model, name='model_rnn'):
| super(ModelRNN, self).__init__(name=name)
if (not callable(model)):
raise TypeError('Model must be callable.')
if isinstance(model, rnn_core.RNNCore):
raise TypeError('Model should not be an RNNCore.')
try:
self._output_size = model.output_size
except ... |
'Connects the ModelRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may diff... | def _build(self, inputs, prev_state):
| next_state = self._model(prev_state)
return (next_state, next_state)
|
'Tests ACT using an LSTM for the core.'
| @parameterized.Parameters((13, 11, 7, 3, 5), (3, 3, 3, 1, 5), (1, 1, 1, 1, 1))
def testACTLSTM(self, input_size, hidden_size, output_size, seq_len, batch_size):
| lstm = gated_rnn.LSTM(hidden_size)
def get_hidden_state(state):
(hidden, unused_cell) = state
return hidden
self._testACT(input_size, hidden_size, output_size, seq_len, batch_size, lstm, get_hidden_state)
|
'Tests ACT using an LSTM for the core.'
| @parameterized.Parameters((13, 11, 7, 3, 5), (3, 3, 3, 1, 5), (1, 1, 1, 1, 1))
def testACTVanilla(self, input_size, hidden_size, output_size, seq_len, batch_size):
| vanilla = basic_rnn.VanillaRNN(hidden_size)
def get_state(state):
return state
self._testACT(input_size, hidden_size, output_size, seq_len, batch_size, vanilla, get_state)
|
'Constructs a new `BlockTriangularMatrix` module.
Args:
block_shape: tuple, 2-dimensional tuple indicating the shape of each
individual block.
block_rows: int, the number of blocks in each row (and column) of the
output matrix.
include_diagonal: boolean, indicates whether or not blocks on the diagonal
entries should be... | def __init__(self, block_shape, block_rows, include_diagonal=True, include_off_diagonal=True, upper=False, name='block_triangular_matrix'):
| super(BlockTriangularMatrix, self).__init__(name=name)
if ((not include_diagonal) and (not include_off_diagonal)):
raise ValueError('Arguments include_diagonal and include_off_diagonal cannot both be False.')
self._block_shape = tuple(block_shape)
self._block_rows = block_ro... |
'The total number of blocks in the output matrix.'
| @property
def num_blocks(self):
| return self._num_blocks
|
'The number of entries of each block.'
| @property
def block_size(self):
| return (self._block_shape[0] * self._block_shape[1])
|
'The shape of each block.'
| @property
def block_shape(self):
| return self._block_shape
|
'The shape of the output matrix.'
| @property
def output_shape(self):
| return ((self._block_shape[0] * self._block_rows), (self._block_shape[1] * self._block_rows))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.