desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Module constructor.
Args:
idx: Indexes of the tensors to select. If `idx` is an integer, then
a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a
(nested) tuple of `Tensor` is returned.
name: Name of the module.
Raises:
TypeError: If `idx` is not an list, tuple or integer.'
| def __init__(self, idx, name='select_input'):
| super(SelectInput, self).__init__(name=name)
self._check_type(idx)
self._idx = idx
|
'Connects the module into the graph.
Args:
*inputs: `Tensor` variables to select.
Returns:
Subset of `inputs` in an arbitrarily nested configuration.
Raises:
ValueError: If any entry of `idx` is out of bounds with respect to the
size of `inputs`.'
| def _build(self, *inputs):
| return self._select(inputs, self._idx)
|
'Test the minimum input size calculator.'
| def testCalcMinSize(self):
| net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI)
self.assertEqual(net._calc_min_size([(None, (3, 1), None)]), 3)
self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2))]), 5)
self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2)), (None, (3, 2), (5, 2))]), 25)
|
'Test that each mode can be instantiated.'
| def testModes(self):
| modes = [snt.nets.AlexNet.FULL, snt.nets.AlexNet.HALF, snt.nets.AlexNet.MINI]
keep_prob = tf.placeholder(tf.float32)
for mode in modes:
net = snt.nets.AlexNet(name='net_{}'.format(mode), mode=mode)
input_shape = [None, net._min_size, net._min_size, 3]
inputs = tf.placeholder(tf.float... |
'Test that batch norm can be instantiated.'
| def testBatchNorm(self):
| net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL, use_batch_norm=True)
input_shape = [net._min_size, net._min_size, 3]
inputs = tf.placeholder(tf.float32, shape=([None] + input_shape))
output = net(inputs, is_training=True)
with self.test_session() as sess:
sess.run(tf.global_variables_init... |
'An exception should be raised if trying to use dropout when testing.'
| def testNoDropoutInTesting(self):
| net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL)
input_shape = [net._min_size, net._min_size, 3]
inputs = tf.placeholder(tf.float32, shape=([None] + input_shape))
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
output = net(inputs, keep_prob, is_training=False)
with self.test_session(... |
'Check that an error is raised if the input image is too small.'
| def testInputTooSmall(self):
| keep_prob = tf.placeholder(tf.float32)
net = snt.nets.AlexNet(mode=snt.nets.AlexNet.FULL)
input_shape = [None, net._min_size, net._min_size, 1]
inputs = tf.placeholder(tf.float32, shape=input_shape)
net(inputs, keep_prob, is_training=True)
with self.assertRaisesRegexp(snt.IncompatibleShapeError,... |
'Check that the correct number of variables are made when sharing.'
| def testSharing(self):
| net = snt.nets.AlexNet(mode=snt.nets.AlexNet.MINI)
inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
keep_prob1 = tf.placeholder(tf.float32)
keep_prob2 = tf.placeholder(tf.float32)
net(inputs1, keep_prob1, is_training=True... |
'Initialize Dilation module and test images.
Args:
num_output_classes: int. Number of output classes the dilation module
should predict per pixel.
depth: None or int. Input depth of image. If None, same as
num_output_classes.'
| def setUpWithNumOutputClasses(self, num_output_classes, depth=None):
| self._num_output_classes = num_output_classes
self._model_size = 'basic'
self._module = snt.nets.Dilation(num_output_classes=self._num_output_classes, model_size=self._model_size)
self._batch_size = 1
self._height = self._width = 5
self._depth = (depth or num_output_classes)
self._rng = np.r... |
'Constructs a `ConvNet2D` module.
By default, neither batch normalization nor activation are applied to the
output of the final layer.
Args:
output_channels: Iterable of output channels, as defined in
`conv.Conv2D`. Output channels can be defined either as number or via a
callable. In the latter case, since the functio... | def __init__(self, output_channels, kernel_shapes, strides, paddings, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_batch_norm=False, use_bias=True, batch_norm_config=None, name='conv_net_2d'):
| if (not isinstance(output_channels, collections.Iterable)):
raise TypeError('output_channels must be iterable')
output_channels = tuple(output_channels)
if (not isinstance(kernel_shapes, collections.Iterable)):
raise TypeError('kernel_shapes must be iterable')
kernel_sh... |
'Instantiates all the convolutional modules used in the network.'
| def _instantiate_layers(self):
| with self._enter_variable_scope():
self._layers = tuple((conv.Conv2D(name='conv_2d_{}'.format(i), output_channels=self._output_channels[i], kernel_shape=self._kernel_shapes[i], stride=self._strides[i], padding=self._paddings[i], use_bias=self._use_bias[i], initializers=self._initializers, partitioners=self.... |
'Assembles the `ConvNet2D` and connects it to the graph.
Args:
inputs: A 4D Tensor of shape `[batch_size, input_height, input_width,
input_channels]`.
is_training: Boolean to indicate to `snt.BatchNorm` if we are
currently training. Must be specified explicitly if `use_batchnorm` is
`True`.
test_local_stats: Boolean to... | def _build(self, inputs, is_training=None, test_local_stats=True):
| if (self._use_batch_norm and (is_training is None)):
raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization.')
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = (len(self._layers) - 1)
... |
'Returns a tuple containing the convolutional layers of the network.'
| @property
def layers(self):
| return self._layers
|
'Returns shape of input `Tensor` passed at last call to `build`.'
| @property
def input_shape(self):
| self._ensure_is_connected()
return self._input_shape
|
'Returns transposed version of this network.
Args:
transpose_constructor: A method that creates an instance of the transposed
network type. The method must accept the same kwargs as this methods
with the exception of the `transpose_constructor` argument.
name: Optional string specifying the name of the transposed modul... | def _transpose(self, transpose_constructor, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
| if (output_channels is None):
output_channels = []
for layer in reversed(self._layers):
output_channels.append((lambda l=layer: l.input_shape[(-1)]))
elif (len(output_channels) != len(self._layers)):
raise ValueError('Iterable output_channels length must match ... |
'Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The ... | def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
| output_shapes = []
for layer in reversed(self._layers):
output_shapes.append((lambda l=layer: l.input_shape[1:(-1)]))
transpose_constructor = functools.partial(ConvNet2DTranspose, output_shapes=output_shapes)
return self._transpose(transpose_constructor=transpose_constructor, name=name, output_c... |
'Constructs a `ConvNetTranspose2D` module.
`output_{shapes,channels}` can be defined either as iterable of
{iterables,integers} or via a callable. In the latter case, since the
function invocation is deferred to graph construction time, the user
must only ensure that entries can be called returning meaningful values wh... | def __init__(self, output_channels, output_shapes, kernel_shapes, strides, paddings, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_batch_norm=False, use_bias=True, batch_norm_config=None, name='conv_net_2d_transpose'):
| if (not isinstance(output_channels, collections.Iterable)):
raise TypeError('output_channels must be iterable')
output_channels = tuple(output_channels)
num_layers = len(output_channels)
if (not isinstance(output_shapes, collections.Iterable)):
raise TypeError('output_shapes ... |
'Instantiates all the convolutional modules used in the network.'
| def _instantiate_layers(self):
| with self._enter_variable_scope():
self._layers = tuple((conv.Conv2DTranspose(name='conv_2d_transpose_{}'.format(i), output_channels=self._output_channels[i], output_shape=self._output_shapes[i], kernel_shape=self._kernel_shapes[i], stride=self._strides[i], padding=self._paddings[i], initializers=self._init... |
'Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The ... | def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None):
| return self._transpose(transpose_constructor=ConvNet2D, name=name, output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_batch_norm=u... |
'Constructs AlexNet.
Args:
mode: Construction mode of network: `AlexNet.FULL`, `AlexNet.HALF` or
`AlexNet.MINI`.
use_batch_norm: Whether to use batch normalization between the output of
a layer and the activation function.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
... | def __init__(self, mode=HALF, use_batch_norm=False, batch_norm_config=None, initializers=None, partitioners=None, regularizers=None, name='alex_net'):
| super(AlexNet, self).__init__(name=name)
self._mode = mode
self._use_batch_norm = use_batch_norm
if (batch_norm_config is not None):
if (not isinstance(batch_norm_config, collections.Mapping)):
raise TypeError('`batch_norm_config` must be a mapping, e.g. `dict`.')
... |
'Calculates the minimum size of the input layer.
Given a set of convolutional layers, calculate the minimum value of
the `input_height` and `input_width`, i.e. such that the output has
size 1x1. Assumes snt.VALID padding.
Args:
conv_layers: List of tuples `(output_channels, (kernel_size, stride),
(pooling_size, pooling... | def _calc_min_size(self, conv_layers):
| input_size = 1
for (_, conv_params, max_pooling) in reversed(conv_layers):
if (max_pooling is not None):
(kernel_size, stride) = max_pooling
input_size = ((input_size * stride) + (kernel_size - stride))
if (conv_params is not None):
(kernel_size, stride) = con... |
'Connects the AlexNet module into the graph.
The is_training flag only controls the batch norm settings, if `False` it
does not force no dropout by overriding any input `keep_prob`. To avoid any
confusion this may cause, if `is_training=False` and `keep_prob` would cause
dropout to be applied, an error is thrown.
Args:... | def _build(self, inputs, keep_prob=None, is_training=None, test_local_stats=True):
| if ((self._use_batch_norm or (keep_prob is not None)) and (is_training is None)):
raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization or dropout.')
input_shape = inputs.get_shape().as_list()
if ((input_shape[1] < ... |
'Returns integer specifying the minimum width and height for the input.
Note that the input can be non-square, but both the width and height must
be >= this number in size.
Returns:
The minimum size as an integer.'
| @property
def min_input_size(self):
| return self._min_size
|
'Returns list containing convolutional modules of network.
Returns:
A list containing the Conv2D modules.'
| @property
def conv_modules(self):
| self._ensure_is_connected()
return self._conv_modules
|
'Returns list containing linear modules of network.
Returns:
A list containing the Linear modules.'
| @property
def linear_modules(self):
| self._ensure_is_connected()
return self._linear_modules
|
'Creates a dilation module.
Args:
num_output_classes: Int. Number of output classes to predict for
each pixel in an image.
initializers: Optional dict containing ops to initialize filters (with key
\'w\') or biases (with key \'b\'). The default initializer makes this module
equivalent to the identity.
regularizers: Opt... | def __init__(self, num_output_classes, initializers=None, regularizers=None, model_size='basic', name='dilation'):
| super(Dilation, self).__init__(name=name)
self._num_output_classes = num_output_classes
self._model_size = model_size
self._initializers = util.check_initializers(initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(regularizers, self.POSSIBLE_INITIALIZER_KEYS)
|
'Build dilation module.
Args:
images: Tensor of shape [batch_size, height, width, depth]
and dtype float32. Represents a set of images with an arbitrary depth.
Note that when using the default initializer, depth must equal
num_output_classes.
Returns:
Tensor of shape [batch_size, height, width, num_output_classes] and ... | def _build(self, images):
| num_classes = self._num_output_classes
if (len(images.get_shape()) != 4):
raise base.IncompatibleShapeError("'images' must have shape [batch_size, height, width, depth].")
if (self.WEIGHTS not in self._initializers):
if (self._model_size == self.BASIC):
self.... |
'Create a dilated convolution layer.
Args:
output_channels: int. Number of output channels for each pixel.
dilation_rate: int. Represents how many pixels each stride offset will
move. A value of 1 indicates a standard convolution.
apply_relu: bool. If True, a ReLU non-linearlity is added.
name: string. Name for layer.
... | def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name):
| layer_components = [conv.Conv2D(output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name=('dilated_conv_' + name))]
if apply_relu:
layer_components.append((lambda net: tf.nn.relu(net, name=('relu_' + name))))
return sequential.Sequential(lay... |
'Tests for regressions in variable names.'
| def testVariableMap(self):
| use_bias = True
var_names_w = [u'mlp/linear_0/w:0', u'mlp/linear_1/w:0', u'mlp/linear_2/w:0']
var_names_b = [u'mlp/linear_0/b:0', u'mlp/linear_1/b:0', u'mlp/linear_2/b:0']
correct_variable_names = set((var_names_w + var_names_b))
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_siz... |
'Constructs an MLP module.
Args:
output_sizes: An iterable of output dimensionalities as defined in
`basic.Linear`. Output size can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be calle... | def __init__(self, output_sizes, activation=tf.nn.relu, activate_final=False, initializers=None, partitioners=None, regularizers=None, use_bias=True, custom_getter=None, name='mlp'):
| super(MLP, self).__init__(custom_getter=custom_getter, name=name)
if (not isinstance(output_sizes, collections.Iterable)):
raise TypeError('output_sizes must be iterable')
output_sizes = tuple(output_sizes)
if (not output_sizes):
raise ValueError('output_sizes must not ... |
'Instantiates all the linear modules used in the network.
Layers are instantiated in the constructor, as opposed to the build
function, because MLP implements the Transposable interface, and the
transpose function can be called before the module is actually connected
to the graph and build is called.
Notice that this i... | def _instantiate_layers(self):
| with self._enter_variable_scope():
self._layers = [basic.Linear(self._output_sizes[i], name='linear_{}'.format(i), initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=self.use_bias) for i in xrange(self._num_layers)]
|
'Assembles the `MLP` and connects it to the graph.
Args:
inputs: A 2D Tensor of size `[batch_size, input_size]`.
Returns:
A 2D Tensor of size `[batch_size, output_sizes[-1]]`.'
| def _build(self, inputs):
| self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = (self._num_layers - 1)
for layer_id in xrange(self._num_layers):
net = self._layers[layer_id](net)
if ((final_index != layer_id) or self._activate_final):
net = self._activation(net)
return... |
'Returns a tuple containing the linear layers of the `MLP`.'
| @property
def layers(self):
| return self._layers
|
'Returns a tuple of all output sizes of all the layers.'
| @property
def output_sizes(self):
| return tuple([(l() if callable(l) else l) for l in self._output_sizes])
|
'Returns the size of the module output, not including the batch dimension.
This allows the MLP to be used inside a DeepRNN.
Returns:
The scalar size of the module output.'
| @property
def output_size(self):
| last_size = self._output_sizes[(-1)]
return (last_size() if callable(last_size) else last_size)
|
'Returns the intializers dictionary.'
| @property
def initializers(self):
| return self._initializers
|
'Returns the partitioners dictionary.'
| @property
def partitioners(self):
| return self._partitioners
|
'Returns the regularizers dictionary.'
| @property
def regularizers(self):
| return self._regularizers
|
'Returns shape of input `Tensor` passed at last call to `build`.'
| @property
def input_shape(self):
| self._ensure_is_connected()
return self._input_shape
|
'Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Retu... | def transpose(self, name=None, activate_final=None):
| if (name is None):
name = (self.module_name + '_transpose')
if (activate_final is None):
activate_final = self.activate_final
output_sizes = [(lambda l=layer: l.input_shape[1]) for layer in self._layers]
output_sizes.reverse()
return MLP(name=name, output_sizes=output_sizes, activati... |
'Tests if .transpose correctly chooses the default parameters.
Args:
module: The conv net class.
param_name: The name of the parameter to test.'
| @parameterized.Parameters(*itertools.product([snt.nets.ConvNet2D, partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])], ['kernel_shapes', 'strides', 'paddings', 'activation', 'initializers', 'partitioners', 'regularizers', 'use_bias', 'batch_norm_config']))
def testTransposeDefaultParameter(self, module, p... | expected_reversed = ['kernel_shapes', 'strides', 'paddings', 'use_bias']
model = module(output_channels=[2, 3, 4], kernel_shapes=[[3, 3], [5, 5], [7, 7]], strides=[[1, 1], [2, 2], [3, 3]], paddings=[snt.SAME, snt.SAME, snt.VALID], use_batch_norm=[True, True, False], use_bias=[True, True, False])
transpose_m... |
'Tests if .transpose correctly passes through the given parameters.
Args:
module: The conv net class.
param_name_and_value: Tuple consisting of the parameter name and value.'
| @parameterized.Parameters(*itertools.product([snt.nets.ConvNet2D, partial(snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]])], [('kernel_shapes', [[3, 3], [3, 3], [3, 3]]), ('strides', [[1, 1], [1, 1], [1, 1]]), ('paddings', [snt.SAME, snt.SAME, snt.SAME]), ('activation', tf.nn.tanh), ('initializers', {}), ('part... | (param_name, param_value) = param_name_and_value
model = module(output_channels=[2, 3, 4], kernel_shapes=[[3, 3], [5, 5], [7, 7]], strides=[[1, 1], [2, 2], [3, 3]], paddings=[snt.SAME, snt.SAME, snt.VALID], use_batch_norm=[True, True, False], use_bias=[True, True, False])
transpose_model = model.transpose(*... |
'Tests for regressions in variable names.'
| def testVariableMap(self):
| use_bias = True
use_batch_norm = True
var_names_w = [u'conv_net_2d/conv_2d_0/w:0', u'conv_net_2d/conv_2d_1/w:0', u'conv_net_2d/conv_2d_2/w:0']
var_names_b = [u'conv_net_2d/conv_2d_0/b:0', u'conv_net_2d/conv_2d_1/b:0', u'conv_net_2d/conv_2d_2/b:0']
var_names_bn = [u'conv_net_2d/batch_norm_0/beta:0', ... |
'Returns concrete test functions for a test and a list of parameters.
The naming_type is used to determine the name of the concrete
functions as reported by the unittest framework. If naming_type is
_FIRST_ARG, the testcases must be tuples, and the first element must
have a string representation that is a valid Python ... | def __init__(self, test_method, testcases, naming_type):
| self._test_method = test_method
self.testcases = testcases
self._naming_type = naming_type
self.__name__ = _ParameterizedTestIter.__name__
|
'Returns the descriptive ID of the test.
This is used internally by the unittesting framework to get a name
for the test to be used in reports.
Returns:
The test id.'
| def id(self):
| return ('%s.%s%s' % (_StrClass(self.__class__), self._OriginalName(), self._id_suffix.get(self._testMethodName, '')))
|
'Creates a TokenDataSource instance.
Args:
data_file: file object containing text data to be tokenized.
vocab_data_file: file object containing text data used to initialize
the vocabulary.'
| def __init__(self, data_file, vocab_data_file):
| def reading_function(f):
return list(f.read().replace('\n', self.CHAR_EOS))
self._vocab_dict = {}
self._inv_vocab_dict = {}
token_list = reading_function(vocab_data_file)
self.vocab_size = 0
for token in (self.DEFAULT_START_TOKENS + token_list):
if (token not in self._vocab_dict)... |
'Produces the list of integer indices corresponding to a token list.'
| def tokenize(self, token_list):
| return [self._vocab_dict.get(token, self._vocab_dict[self.UNK]) for token in token_list]
|
'Produces a human-readable representation of the token list.'
| def decode(self, token_list):
| return ''.join([self._inv_vocab_dict[token] for token in token_list])
|
'Initializes a TinyShakespeare sequence data object.
Args:
num_steps: sequence_length.
batch_size: batch size.
subset: \'train\', \'valid\' or \'test\'.
random: boolean indicating whether to do random sampling of sequences.
Default is false (sequential sampling).
dtype: type of generated tensors (both observations and ... | def __init__(self, num_steps=1, batch_size=1, subset='train', random=False, dtype=tf.float32, name='tiny_shakespeare_dataset'):
| if (subset not in [self.TRAIN, self.VALID, self.TEST]):
raise ValueError(('subset should be %s, %s, or %s. Received %s instead.' % (self.TRAIN, self.VALID, self.TEST, subset)))
super(TinyShakespeareDataset, self).__init__(name=name)
self._vocab_file = gfile.Open(os.path.jo... |
'Returns a batch of sequences.
Returns:
obs: np.int32 array of size [Time, Batch]
target: np.int32 array of size [Time, Batch]'
| def _get_batch(self):
| batch_indices = np.mod(np.array([np.arange(head_index, ((head_index + self._num_steps) + 1)) for head_index in self._head_indices]), self._n_flat_elements)
obs = np.array([self._flat_data[indices[:self._num_steps]] for indices in batch_indices]).T
target = np.array([self._flat_data[indices[1:(self._num_step... |
'Returns a tuple containing observation and target one-hot tensors.'
| def _build(self):
| q = tf.FIFOQueue(self._queue_capacity, [self._dtype, self._dtype], shapes=([[self._num_steps, self._batch_size, self._vocab_size]] * 2))
(obs, target) = tf.py_func(self._get_batch, [], [tf.int32, tf.int32])
obs = self._one_hot(obs)
target = self._one_hot(target)
enqueue_op = q.enqueue([obs, target])... |
'Returns cost.
Args:
logits: model output.
target: target.
Returns:
Cross-entropy loss for a sequence of logits. The loss will be averaged
across time steps if time_average_cost was enabled at construction time.'
| def cost(self, logits, target):
| logits = tf.reshape(logits, [(self._num_steps * self._batch_size), (-1)])
target = tf.reshape(target, [(self._num_steps * self._batch_size), (-1)])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target)
loss = tf.reduce_sum(xent)
return (loss / self._batch_size)
|
'Returns a human-readable version of a one-hot encoding of words.
Args:
data: A tuple with (obs, target). `obs` is a numpy array with one-hot
encoding of words.
label_batch_entries: bool. Whether to add numerical label before each
batch element in the output string.
indices: List of int or None. Used to select a subset... | def to_human_readable(self, data, label_batch_entries=True, indices=None, sep='\n'):
| obs = data[0]
batch_size = obs.shape[1]
result = []
indices = (xrange(batch_size) if (not indices) else indices)
for b in indices:
index_seq = np.argmax(obs[:, b], axis=1)
prefix = ('b_{}: '.format(b) if label_batch_entries else '')
result.append((prefix + self._data_sourc... |
'Constructs a `TextModel`.
Args:
num_embedding: Size of embedding representation, used directly after the
one-hot encoded input.
num_hidden: Number of hidden units in each LSTM layer.
lstm_depth: Number of LSTM layers.
output_size: Size of the output layer on top of the DeepRNN.
use_dynamic_rnn: Whether to use dynamic ... | def __init__(self, num_embedding, num_hidden, lstm_depth, output_size, use_dynamic_rnn=True, use_skip_connections=True, name='text_model'):
| super(TextModel, self).__init__(name=name)
self._num_embedding = num_embedding
self._num_hidden = num_hidden
self._lstm_depth = lstm_depth
self._output_size = output_size
self._use_dynamic_rnn = use_dynamic_rnn
self._use_skip_connections = use_skip_connections
with self._enter_variable_s... |
'Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size... | def _build(self, one_hot_input_sequence):
| input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_state = self._core.initial_state(batch_size)
if... |
'Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sampling from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`.'
| @snt.experimental.reuse_vars
def generate_string(self, initial_logits, initial_state, sequence_length):
| current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_ho... |
'Tests reproducibility of Torch results.'
| def testResults(self):
| problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': (), 'initializer': 'zeros'}))
minimize_ops = optimizer.meta_minimize(problem, 5)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
(cost, f... |
'Tests different variable->net mappings in multi-optimizer problem.'
| @parameterized.expand([(None, {'net': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1, 1)}}}), ([('net', ['x_0', 'x_1'])], {'net': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (1,)}}}), ([('net1', ['x_0']), ('net2', ['x_1'])], {'net1': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'lay... | problem = problems.simple_multi_optimizer(num_dims=2)
optimizer = meta.MetaOptimizer(**net_config)
minimize_ops = optimizer.meta_minimize(problem, 3, net_assignments=net_assignments)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(sess, minimize_ops, 1... |
'Tests second derivatives for simple problem.'
| def testSecondDerivatives(self):
| problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': ()}))
minimize_ops = optimizer.meta_minimize(problem, 3, second_derivatives=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
train(s... |
'Tests L2L applied to problem with convolutions.'
| def testConvolutional(self):
| kernel_shape = 4
def convolutional_problem():
conv = snt.Conv2D(output_channels=1, kernel_shape=kernel_shape, stride=1, name='conv')
output = conv(tf.random_normal((100, 100, 3, 10)))
return tf.reduce_sum(output)
net_config = {'conv': {'net': 'KernelDeepLSTM', 'net_options': {'kernel... |
'Tests L2L applied to problem with while loop.'
| def testWhileLoopProblem(self):
| def while_loop_problem():
x = tf.get_variable('x', shape=[], initializer=tf.ones_initializer())
(_, x_squared) = tf.while_loop(cond=(lambda t, _: (t < 1)), body=(lambda t, x: ((t + 1), (x * x))), loop_vars=(0, x), name='loop')
return x_squared
optimizer = meta.MetaOptimizer(net=dict(net=... |
'Tests saving and loading a meta-optimizer.'
| def testSaveAndLoad(self):
| layers = (2, 3)
net_options = {'layers': layers, 'initializer': 'zeros'}
num_unrolls = 2
num_epochs = 1
problem = problems.simple()
with tf.Graph().as_default() as g1:
optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options=net_options))
minimize_ops = o... |
'Tests the network contains trainable variables.'
| def testTrainable(self):
| shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.CoordinateWiseDeepLSTM(layers=(1,))
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 4)
|
'Tests zero updates when last layer is initialized to zero.'
| @parameterized.expand([['zeros'], [{'w': 'zeros', 'b': 'zeros', 'bad': 'bad'}], [{'w': tf.zeros_initializer(), 'b': np.array([0])}], [{'linear': {'w': tf.zeros_initializer(), 'b': 'zeros'}}]])
def testResults(self, initializer):
| shape = [10]
gradients = tf.random_normal(shape)
net = networks.CoordinateWiseDeepLSTM(layers=(1, 1), initializer=initializer)
state = net.initial_state_for_inputs(gradients)
(update, _) = net(gradients, state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())... |
'Tests the network contains trainable variables.'
| def testTrainable(self):
| kernel_shape = [5, 5]
shape = (kernel_shape + [2, 2])
gradients = tf.random_normal(shape)
net = networks.KernelDeepLSTM(layers=(1,), kernel_shape=kernel_shape)
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEq... |
'Tests zero updates when last layer is initialized to zero.'
| @parameterized.expand([['zeros'], [{'w': 'zeros', 'b': 'zeros', 'bad': 'bad'}], [{'w': tf.zeros_initializer(), 'b': np.array([0])}], [{'linear': {'w': tf.zeros_initializer(), 'b': 'zeros'}}]])
def testResults(self, initializer):
| kernel_shape = [5, 5]
shape = (kernel_shape + [2, 2])
gradients = tf.random_normal(shape)
net = networks.KernelDeepLSTM(layers=(1, 1), kernel_shape=kernel_shape, initializer=initializer)
state = net.initial_state_for_inputs(gradients)
(update, _) = net(gradients, state)
with self.test_sessio... |
'Tests the network doesn\'t contain trainable variables.'
| def testNonTrainable(self):
| shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Sgd()
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 0)
|
'Tests network produces zero updates with learning rate equal to zero.'
| def testResults(self):
| shape = [10]
learning_rate = 0.01
gradients = tf.random_normal(shape)
net = networks.Sgd(learning_rate=learning_rate)
state = net.initial_state_for_inputs(gradients)
(update, _) = net(gradients, state)
with self.test_session() as sess:
(gradients_np, update_np) = sess.run([gradients,... |
'Tests the network doesn\'t contain trainable variables.'
| def testNonTrainable(self):
| shape = [10, 5]
gradients = tf.random_normal(shape)
net = networks.Adam()
state = net.initial_state_for_inputs(gradients)
net(gradients, state)
variables = snt.get_variables_in_module(net)
self.assertEqual(len(variables), 0)
|
'Tests network produces zero updates with learning rate equal to zero.'
| def testZeroLearningRate(self):
| shape = [10]
gradients = tf.random_normal(shape)
net = networks.Adam(learning_rate=0)
state = net.initial_state_for_inputs(gradients)
(update, _) = net(gradients, state)
with self.test_session() as sess:
update_np = sess.run(update)
self.assertAllEqual(update_np, np.zeros(shape))... |
'Creates a MetaOptimizer.
Args:
**kwargs: A set of keyword arguments mapping network identifiers (the
keys) to parameters that will be passed to networks.Factory (see docs
for more info). These can be used to assign different optimizee
parameters to different optimizers (see net_assignments in the
meta_loss method).'
| def __init__(self, **kwargs):
| self._nets = None
if (not kwargs):
self._config = {'coordinatewise': {'net': 'CoordinateWiseDeepLSTM', 'net_options': {'layers': (20, 20), 'preprocess_name': 'LogAndSign', 'preprocess_options': {'k': 5}, 'scale': 0.01}}}
else:
self._config = kwargs
|
'Save meta-optimizer.'
| def save(self, sess, path=None):
| result = {}
for (k, net) in self._nets.items():
if (path is None):
filename = None
key = k
else:
filename = os.path.join(path, '{}.l2l'.format(k))
key = filename
net_vars = networks.save(net, sess, filename=filename)
result[key] = n... |
'Returns an operator computing the meta-loss.
Args:
make_loss: Callable which returns the optimizee loss; note that this
should create its ops in the default graph.
len_unroll: Number of steps to unroll.
net_assignments: variable to optimizer mapping. If not None, it should be
a list of (k, names) tuples, where k is a ... | def meta_loss(self, make_loss, len_unroll, net_assignments=None, second_derivatives=False):
| (x, constants) = _get_variables(make_loss)
print('Optimizee variables')
print([op.name for op in x])
print('Problem variables')
print([op.name for op in constants])
(nets, net_keys, subsets) = _make_nets(x, self._config, net_assignments)
self._nets = nets
state = []
with tf.nam... |
'Returns an operator minimizing the meta-loss.
Args:
make_loss: Callable which returns the optimizee loss; note that this
should create its ops in the default graph.
len_unroll: Number of steps to unroll.
learning_rate: Learning rate for the Adam optimizer.
**kwargs: keyword arguments forwarded to meta_loss.
Returns:
n... | def meta_minimize(self, make_loss, len_unroll, learning_rate=0.01, **kwargs):
| info = self.meta_loss(make_loss, len_unroll, **kwargs)
optimizer = tf.train.AdamOptimizer(learning_rate)
step = optimizer.minimize(info.loss)
return MetaStep(step, *info[1:])
|
'Initial state given inputs.'
| @abc.abstractmethod
def initial_state_for_inputs(self, inputs, **kwargs):
| pass
|
'Creates an instance of `StandardDeepLSTM`.
Args:
output_size: Output sizes of the final linear layer.
layers: Output sizes of LSTM layers.
preprocess_name: Gradient preprocessing class name (in `l2l.preprocess` or
tf modules). Default is `tf.identity`.
preprocess_options: Gradient preprocessing options.
scale: Gradien... | def __init__(self, output_size, layers, preprocess_name='identity', preprocess_options=None, scale=1.0, initializer=None, name='deep_lstm'):
| super(StandardDeepLSTM, self).__init__(name=name)
self._output_size = output_size
self._scale = scale
if hasattr(preprocess, preprocess_name):
preprocess_class = getattr(preprocess, preprocess_name)
self._preprocess = preprocess_class(**preprocess_options)
else:
self._preproc... |
'Connects the `StandardDeepLSTM` module into the graph.
Args:
inputs: 2D `Tensor` ([batch_size, input_size]).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.'
| def _build(self, inputs, prev_state):
| inputs = self._preprocess(tf.expand_dims(inputs, (-1)))
inputs = tf.reshape(inputs, [inputs.get_shape().as_list()[0], (-1)])
(output, next_state) = self._rnn(inputs, prev_state)
return ((self._linear(output) * self._scale), next_state)
|
'Creates an instance of `CoordinateWiseDeepLSTM`.
Args:
name: Module name.
**kwargs: Additional `DeepLSTM` args.'
| def __init__(self, name='cw_deep_lstm', **kwargs):
| super(CoordinateWiseDeepLSTM, self).__init__(1, name=name, **kwargs)
|
'Connects the CoordinateWiseDeepLSTM module into the graph.
Args:
inputs: Arbitrarily shaped `Tensor`.
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.'
| def _build(self, inputs, prev_state):
| input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(CoordinateWiseDeepLSTM, self)._build
(output, next_state) = build_fn(reshaped_inputs, prev_state)
return (tf.reshape(output, input_shape), next_state)
|
'Creates an instance of `KernelDeepLSTM`.
Args:
kernel_shape: Kernel shape (2D `tuple`).
name: Module name.
**kwargs: Additional `DeepLSTM` args.'
| def __init__(self, kernel_shape, name='kernel_deep_lstm', **kwargs):
| self._kernel_shape = kernel_shape
output_size = np.prod(kernel_shape)
super(KernelDeepLSTM, self).__init__(output_size, name=name, **kwargs)
|
'Connects the KernelDeepLSTM module into the graph.
Args:
inputs: 4D `Tensor` (convolutional filter).
prev_state: `DeepRNN` state.
Returns:
`Tensor` shaped as `inputs`.'
| def _build(self, inputs, prev_state):
| input_shape = inputs.get_shape().as_list()
reshaped_inputs = self._reshape_inputs(inputs)
build_fn = super(KernelDeepLSTM, self)._build
(output, next_state) = build_fn(reshaped_inputs, prev_state)
transposed_output = tf.transpose(output, [1, 0])
return (tf.reshape(transposed_output, input_shape)... |
'Batch size given inputs.'
| def initial_state_for_inputs(self, inputs, **kwargs):
| reshaped_inputs = self._reshape_inputs(inputs)
return super(KernelDeepLSTM, self).initial_state_for_inputs(reshaped_inputs, **kwargs)
|
'Creates an instance of the Identity optimizer network.
Args:
learning_rate: constant learning rate to use.
name: Module name.'
| def __init__(self, learning_rate=0.001, name='sgd'):
| super(Sgd, self).__init__(name=name)
self._learning_rate = learning_rate
|
'Creates an instance of Adam.'
| def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, name='adam'):
| super(Adam, self).__init__(name=name)
self._learning_rate = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
|
'Connects the Adam module into the graph.'
| def _build(self, g, prev_state):
| b1 = self._beta1
b2 = self._beta2
g_shape = g.get_shape().as_list()
g = tf.reshape(g, ((-1), 1))
(t, m, v) = prev_state
t_next = (t + 1)
m_next = _update_adam_estimate(m, g, b1)
m_hat = _debias_adam_estimate(m_next, b1, t_next)
v_next = _update_adam_estimate(v, tf.square(g), b2)
... |
'Tests L2L applied to simple problem.'
| def testSimple(self):
| problem = problems.simple()
optimizer = meta.MetaOptimizer(net=dict(net='CoordinateWiseDeepLSTM', net_options={'layers': (), 'initializer': 'zeros'}))
minimize_ops = optimizer.meta_minimize(problem, 20, learning_rate=0.01)
with self.test_session() as sess:
sess.run(tf.global_variables_initialize... |
'Connects the LogAndSign module into the graph.
Args:
gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`.
Returns:
`Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements
along the nth dimension correspond to the log output and the remaining
`d_n` elements to the sign output.'
| def _build(self, gradients):
| eps = np.finfo(gradients.dtype.as_numpy_dtype).eps
ndims = gradients.get_shape().ndims
log = tf.log((tf.abs(gradients) + eps))
clamped_log = Clamp(min_value=(-1.0))((log / self._k))
sign = Clamp(min_value=(-1.0), max_value=1.0)((gradients * np.exp(self._k)))
return tf.concat([clamped_log, sign],... |
'You should not call this constructor directly, rather use the convenience functions
that are in track.py. For example, call track.track_from_filename
Let\'s always get the bucket `audio_summary`'
| def __init__(self, identifier, md5, properties):
| super(TrackProxy, self).__init__()
self.id = identifier
self.md5 = md5
self.analysis_url = None
self._object_type = 'track'
self.__dict__.update(properties)
|
'Create a catalog object (get a catalog by ID or get or create one given by name and type)
Args:
id (str): A catalog id or name
Kwargs:
type (str): \'song\' or \'artist\', specifying the catalog type
Returns:
A catalog object
Example:
>>> c = catalog.Catalog(\'my_songs\', type=\'song\')
>>> c.id
u\'CAVKUPC12BCA792120\'... | def __init__(self, id, type=None, **kwargs):
| super(Catalog, self).__init__(id, type, **kwargs)
|
'Update a catalog object
Args:
items (list): A list of dicts describing update data and action codes (see api docs)
Kwargs:
Returns:
A ticket id
Example:
>>> c = catalog.Catalog(\'my_songs\', type=\'song\')
>>> items
[{\'action\': \'update\',
\'item\': {\'artist_name\': \'dAn ThE aUtOmAtOr\',
\'disc_number\': 1,
\'genr... | def update(self, items):
| post_data = {}
items_json = json.dumps(items, default=dthandler)
post_data['data'] = items_json
response = self.post_attribute('update', data=post_data)
return response['ticket']
|
'Check the status of a catalog update
Args:
ticket (str): A string representing a ticket ID
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> ticket
u\'7dcad583f2a38e6689d48a792b2e4c96\'
>>> c.status(ticket)
{u\'ticket_status\': u\'complete\', u\'update_info\': []}'
| def status(self, ticket):
| return self.get_attribute_simple('status', ticket=ticket)
|
'Check the status of a catalog update
Args:
Kwargs:
Returns:
A dictionary representing ticket status
Example:
>>> c
<catalog - test_song_catalog>
>>> c.profile()
{u\'id\': u\'CAGPXKK12BB06F9DE9\',
u\'name\': u\'test_song_catalog\',
u\'pending_tickets\': [],
u\'resolved\': 2,
u\'total\': 4,
u\'type\': u\'song\'}'
| def get_profile(self):
| result = self.get_attribute('profile')
return result['catalog']
|
'Returns data from the catalog; also expanded for the requested buckets.
This method is provided for backwards-compatibility
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result se... | def read_items(self, buckets=None, results=15, start=0, item_ids=None):
| warnings.warn('catalog.read_items() is depreciated. Please use catalog.get_item_dicts() instead.')
kwargs = {}
kwargs['bucket'] = (buckets or [])
kwargs['item_id'] = (item_ids or [])
response = self.get_attribute('read', results=results, start=start, **kwargs)
rval = ResultList... |
'Returns data from the catalog; also expanded for the requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which buckets to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of dicts representing objects in th... | def get_item_dicts(self, buckets=None, results=15, start=0, item_ids=None):
| kwargs = {}
kwargs['bucket'] = (buckets or [])
kwargs['item_id'] = (item_ids or [])
response = self.get_attribute('read', results=results, start=start, **kwargs)
rval = ResultList(response['catalog']['items'])
if item_ids:
rval.start = 0
rval.total = len(response['catalog']['item... |
'Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets
Args:
Kwargs:
buckets (list): A list of strings specifying which feed items to retrieve
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Return... | def get_feed(self, buckets=None, since=None, results=15, start=0):
| kwargs = {}
kwargs['bucket'] = (buckets or [])
if since:
kwargs['since'] = since
response = self.get_attribute('feed', results=results, start=start, **kwargs)
rval = ResultList(response['feed'])
return rval
|
'Deletes the entire catalog
Args:
Kwargs:
Returns:
The deleted catalog\'s id.
Example:
>>> c
<catalog - test_song_catalog>
>>> c.delete()
{u\'id\': u\'CAXGUPY12BB087A21D\'}'
| def delete(self):
| return self.post_attribute('delete')
|
'Retrieve the detailed analysis for the track, if available.
Raises Exception if unable to create the detailed analysis.'
| def get_analysis(self):
| if self.analysis_url:
try:
try:
json_string = urllib2.urlopen(self.analysis_url).read()
except urllib2.HTTPError:
param_dict = dict(id=self.id)
new_track = _profile(param_dict, DEFAULT_ASYNC_TIMEOUT)
if (new_track and ne... |
'Artist class
Args:
id (str): an artistw ID
Returns:
An artist object
Example:
>>> a = artist.Artist(\'ARH6W4X1187B99274F\', buckets=[\'hotttnesss\'])
>>> a.hotttnesss
0.80098515900997658'
| def __init__(self, id, **kwargs):
| super(Artist, self).__init__(id, **kwargs)
|
'Get a list of audio documents found on the web related to an artist
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
... | def get_audio(self, results=15, start=0, cache=True):
| if (cache and ('audio' in self.cache) and (results == 15) and (start == 0)):
return self.cache['audio']
else:
response = self.get_attribute('audio', results=results, start=start)
if ((results == 15) and (start == 0)):
self.cache['audio'] = ResultList(response['audio'], 0, res... |
'Get a list of artist biographies
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
results (int): An integer number of results to return
start (int): An integer starting value for the result set
license (str): A string specifying the desir... | def get_biographies(self, results=15, start=0, license=None, cache=True):
| if (cache and ('biographies' in self.cache) and (results == 15) and (start == 0) and (license == None)):
return self.cache['biographies']
else:
response = self.get_attribute('biographies', results=results, start=start, license=license)
if ((results == 15) and (start == 0) and (license ==... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.