desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'The expected length of the input vector.'
@property def input_size(self):
return (self.block_size * self.num_blocks)
'Number of blocks with zeros from the left in block row `r`.'
def _left_zero_blocks(self, r):
if (not self._include_off_diagonal): return r elif (not self._upper): return 0 elif self._include_diagonal: return r else: return (r + 1)
'Number of blocks with zeros from the right in block row `r`.'
def _right_zero_blocks(self, r):
if (not self._include_off_diagonal): return ((self._block_rows - r) - 1) elif self._upper: return 0 elif self._include_diagonal: return ((self._block_rows - r) - 1) else: return (self._block_rows - r)
'Number of content blocks in block row `r`.'
def _content_blocks(self, r):
return ((self._block_rows - self._left_zero_blocks(r)) - self._right_zero_blocks(r))
'Constructs a new `BlockDiagonalMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. name: string, name of the module.'
def __init__(self, block_shape, block_rows, name='block_diagonal_matrix'):
super(BlockDiagonalMatrix, self).__init__(block_shape=block_shape, block_rows=block_rows, include_diagonal=True, include_off_diagonal=False, name=name)
'Constructs a Conv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function...
def __init__(self, output_channels, kernel_shape, stride=1, rate=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, mask=None, data_format=DATA_FORMAT_NHWC, custom_getter=None, name='conv_2d'):
super(Conv2D, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels self._input_shape = None self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, 'kernel') if (data_format not in SUPPORTED_DATA_FORMATS): raise ValueError('Invalid ...
'Connects the Conv2D module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 3 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may di...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 4): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_height, input_width, input_channels) or (batch_size, input_channels, input_height, input_wid...
'Returns the number of output channels.'
@property def output_channels(self):
if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the stride.'
@property def stride(self):
return (((1,) + self._stride) + (1,))
'Returns the dilation rate.'
@property def rate(self):
return self._rate
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
self._ensure_is_connected() return self._w
'Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias.'
@property def b(self):
self._ensure_is_connected() if (not self._use_bias): raise AttributeError('No bias Variable in Conv2D Module when `use_bias=False`.') return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Returns the mask.'
@property def mask(self):
return self._mask
'Returns the data format.'
@property def data_format(self):
return self._data_format
'Returns a cloned `Conv2D` module. Args: name: Optional string assigning name of cloned module. The default name is constructed by appending "_clone" to `self.module_name`. Returns: `Conv2D` module.'
def clone(self, name=None):
if (name is None): name = (self.module_name + '_clone') return Conv2D(output_channels=self.output_channels, kernel_shape=self.kernel_shape, stride=self.stride, rate=self.rate, padding=self.padding, use_bias=self.has_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self....
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns matching `Conv2DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1.'
def transpose(self, name=None):
if any(((x > 1) for x in self._rate)): raise base.NotSupportedError('Cannot transpose a dilated convolution module.') if (name is None): name = (self.module_name + '_transpose') def output_shape(): if (self._data_format != DATA_FORMAT_NCHW): return self.inp...
'Constructs a `Conv2DTranspose module`. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Can be either a number or a callable. In the latter case, since the function invoca...
def __init__(self, output_channels, output_shape=None, kernel_shape=None, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, data_format=DATA_FORMAT_NHWC, custom_getter=None, name='conv_2d_transpose'):
super(Conv2DTranspose, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels if (output_shape is None): self._output_shape = None self._use_default_output_shape = True else: self._use_default_output_shape = False if callable(output...
'Connects the Conv2DTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 3 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each conne...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 4): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_height, input_width, input_channels)') if (self._data_format == DATA_FORMAT_NCHW): input_channe...
'Returns the number of output channels.'
@property def output_channels(self):
if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the stride.'
@property def stride(self):
return self._stride
'Returns the output shape.'
@property def output_shape(self):
if (self._output_shape is None): self._ensure_is_connected() if callable(self._output_shape): self._output_shape = tuple(self._output_shape()) return self._output_shape
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
self._ensure_is_connected() return self._w
'Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias.'
@property def b(self):
self._ensure_is_connected() if (not self._use_bias): raise AttributeError('No bias Variable in Conv2DTranspose Module when `use_bias=False`.') return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns matching `Conv2D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2D` module.'
def transpose(self, name=None):
if (name is None): name = (self.module_name + '_transpose') return Conv2D(output_channels=(lambda : self.input_shape[(-1)]), kernel_shape=self.kernel_shape, stride=self.stride[1:(-1)], padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regulari...
'Constructs a Conv1D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function...
def __init__(self, output_channels, kernel_shape, stride=1, rate=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='conv_1d'):
super(Conv1D, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels self._input_shape = None self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 1, 'kernel') if (isinstance(stride, collections.Iterable) and (len(stride) == 3)): self._s...
'Connects the Conv1D module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may di...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 3): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_length, input_channels)') if (self._input_shape[2] is None): raise base.UnderspecifiedError('Numbe...
'Returns the number of output channels.'
@property def output_channels(self):
if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the stride.'
@property def stride(self):
return (((1,) + self._stride) + (1,))
'Returns the dilation rate.'
@property def rate(self):
return self._rate
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
return self._w
'Returns the Variable containing the bias.'
@property def b(self):
return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Returns matching `Conv1DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1.'
def transpose(self, name=None):
if any(((x > 1) for x in self._rate)): raise base.NotSupportedError('Cannot transpose a dilated convolution module.') if (name is None): name = (self.module_name + '_transpose') return Conv1DTranspose(output_channels=(lambda : self.input_shape[(-1)]), output_shape=(lambda : se...
'Constructs a Conv1DTranspose module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Can be either a number or a callable. In the latter case, since the function invocati...
def __init__(self, output_channels, output_shape=None, kernel_shape=None, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='conv_1d_transpose'):
super(Conv1DTranspose, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels if (output_shape is None): self._output_shape = None self._use_default_output_shape = True else: self._use_default_output_shape = False if callable(output...
'Connects the Conv1DTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each conne...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 3): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_length, input_channels)') if (self._input_shape[2] is None): raise base.UnderspecifiedError('Numbe...
'Returns the number of output channels.'
@property def output_channels(self):
if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the stride.'
@property def stride(self):
return self._stride
'Returns the output shape.'
@property def output_shape(self):
if (self._output_shape is None): self._ensure_is_connected() if callable(self._output_shape): self._output_shape = self._output_shape() return self._output_shape
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
self._ensure_is_connected() return self._w
'Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias.'
@property def b(self):
self._ensure_is_connected() if (not self._use_bias): raise AttributeError('No bias Variable in Conv1DTranspose Module when `use_bias=False`.') return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.'
def transpose(self, name=None):
if (name is None): name = (self.module_name + '_transpose') return Conv1D(output_channels=(lambda : self.input_shape[(-1)]), kernel_shape=self.kernel_shape, stride=(self._stride[2],), padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regulariz...
'Constructs a CausalConv1D module. Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that output_channels can be called, returning an integer, when ...
def __init__(self, output_channels, kernel_shape, stride=1, rate=1, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='causal_conv_1d'):
super(CausalConv1D, self).__init__(output_channels=output_channels, kernel_shape=kernel_shape, stride=stride, rate=rate, padding=VALID, use_bias=use_bias, initializers=initializers, partitioners=partitioners, regularizers=regularizers, custom_getter=custom_getter, name=name)
'Connects the CausalConv1D module into the graph, with `inputs` as input. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may ...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 3): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_length, input_channels)') if (self._input_shape[2] is None): raise base.UnderspecifiedError('Numbe...
'Constructs an InPlaneConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: kernel_shape: Iterable with 2 elements in the layout [filter_height, filter_width]; or integer that is used to define the list in ...
def __init__(self, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='in_plane_conv2d'):
super(InPlaneConv2D, self).__init__(custom_getter=custom_getter, name=name) self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, 'kernel') if (isinstance(stride, collections.Iterable) and (len(stride) == 4)): if (not (stride[0] == stride[3] == 1)): raise ValueError('Inv...
'Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels]. Returns: A 4D Tensor of shape: [batch_size, output_height, output_width, input_channels]. Raises: ValueError: If connecting the module into the graph any time aft...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 4): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_height, input_width, input_channels)') if (self._input_shape[3] is None): raise base.Incompatib...
'Returns the number of input channels.'
@property def input_channels(self):
self._ensure_is_connected() return self._input_channels
'Returns the number of output channels i.e. number of input channels.'
@property def output_channels(self):
self._ensure_is_connected() return self._input_channels
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the stride.'
@property def stride(self):
return self._stride
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
self._ensure_is_connected() return self._w
'Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias.'
@property def b(self):
self._ensure_is_connected() if (not self._use_bias): raise AttributeError('No bias Variable in InPlaneConv2D Module when `use_bias=False`.') return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Constructs a DepthwiseConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: channel_multiplier: Number of channels to expand convolution to. Must be an integer. Must be > 0. When `channel_multiplier` is se...
def __init__(self, channel_multiplier, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='conv_2d_depthwise'):
super(DepthwiseConv2D, self).__init__(custom_getter=custom_getter, name=name) if ((not isinstance(channel_multiplier, numbers.Integral)) or (channel_multiplier < 1)): raise ValueError(('channel_multiplier (=%d), must be integer >= 1' % channel_multiplier)) self._channel_multiplier ...
'Connects the module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 3 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ fo...
def _build(self, inputs):
self._input_shape = tuple(inputs.get_shape().as_list()) if (len(self._input_shape) != 4): raise base.IncompatibleShapeError('Input Tensor must have shape (batch_size, input_height, input_width, input_channels)') if (self._input_shape[3] is None): raise base.Incompatib...
'Returns the number of input channels.'
@property def input_channels(self):
self._ensure_is_connected() return self._input_channels
'Returns the number of output channels.'
@property def output_channels(self):
self._ensure_is_connected() return self._output_channels
'Returns the input shape.'
@property def input_shape(self):
self._ensure_is_connected() return self._input_shape
'Returns the kernel shape.'
@property def kernel_shape(self):
return self._kernel_shape
'Returns the channel multiplier.'
@property def channel_multiplier(self):
return self._channel_multiplier
'Returns the stride.'
@property def stride(self):
return self._stride
'Returns the padding algorithm.'
@property def padding(self):
return self._padding
'Returns the Variable containing the weight matrix.'
@property def w(self):
self._ensure_is_connected() return self._w
'Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias.'
@property def b(self):
self._ensure_is_connected() if (not self._use_bias): raise AttributeError('No bias Variable in DepthwiseConv2D Module when `use_bias=False`.') return self._b
'Returns `True` if bias Variable is present in the module.'
@property def has_bias(self):
return self._use_bias
'Returns the initializers dictionary.'
@property def initializers(self):
return self._initializers
'Returns the partitioners dictionary.'
@property def partitioners(self):
return self._partitioners
'Returns the regularizers dictionary.'
@property def regularizers(self):
return self._regularizers
'Constructs a SeparableConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Must be an integer. channel_multiplier: Number of channels to expand pointwise (depth...
def __init__(self, output_channels, channel_multiplier, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name='Separable_conv2d'):
super(SeparableConv2D, self).__init__(custom_getter=custom_getter, name=name) if ((not isinstance(output_channels, numbers.Integral)) or (output_channels < 1)): raise ValueError('output_channels (={}), must be integer >= 1'.format(output_channels)) self._output_channels = output_ch...