| | """Decoder self-attention layer definition."""
|
| | from typing import Optional, Tuple
|
| |
|
| | import torch
|
| | from torch import nn
|
| |
|
| |
|
| | class DecoderLayer(nn.Module):
|
| | """Single decoder layer module.
|
| |
|
| | Args:
|
| | size (int): Input dimension.
|
| | self_attn (torch.nn.Module): Self-attention module instance.
|
| | `MultiHeadedAttention` instance can be used as the argument.
|
| | src_attn (torch.nn.Module): Inter-attention module instance.
|
| | `MultiHeadedAttention` instance can be used as the argument.
|
| | If `None` is passed, Inter-attention is not used, such as
|
| | CIF, GPT, and other decoder only model.
|
| | feed_forward (torch.nn.Module): Feed-forward module instance.
|
| | `PositionwiseFeedForward` instance can be used as the argument.
|
| | dropout_rate (float): Dropout rate.
|
| | normalize_before (bool):
|
| | True: use layer_norm before each sub-block.
|
| | False: to use layer_norm after each sub-block.
|
| | """
|
| |
|
| | def __init__(
|
| | self,
|
| | size: int,
|
| | self_attn: nn.Module,
|
| | src_attn: Optional[nn.Module],
|
| | feed_forward: nn.Module,
|
| | dropout_rate: float,
|
| | normalize_before: bool = True,
|
| | ):
|
| | """Construct an DecoderLayer object."""
|
| | super().__init__()
|
| | self.size = size
|
| | self.self_attn = self_attn
|
| | self.src_attn = src_attn
|
| | self.feed_forward = feed_forward
|
| | self.norm1 = nn.LayerNorm(size, eps=1e-5)
|
| | self.norm2 = nn.LayerNorm(size, eps=1e-5)
|
| | self.norm3 = nn.LayerNorm(size, eps=1e-5)
|
| | self.dropout = nn.Dropout(dropout_rate)
|
| | self.normalize_before = normalize_before
|
| |
|
| | def forward(
|
| | self,
|
| | tgt: torch.Tensor,
|
| | tgt_mask: torch.Tensor,
|
| | memory: torch.Tensor,
|
| | memory_mask: torch.Tensor,
|
| | cache: Optional[torch.Tensor] = None
|
| | ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| | """Compute decoded features.
|
| |
|
| | Args:
|
| | tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
|
| | tgt_mask (torch.Tensor): Mask for input tensor
|
| | (#batch, maxlen_out).
|
| | memory (torch.Tensor): Encoded memory
|
| | (#batch, maxlen_in, size).
|
| | memory_mask (torch.Tensor): Encoded memory mask
|
| | (#batch, maxlen_in).
|
| | cache (torch.Tensor): cached tensors.
|
| | (#batch, maxlen_out - 1, size).
|
| |
|
| | Returns:
|
| | torch.Tensor: Output tensor (#batch, maxlen_out, size).
|
| | torch.Tensor: Mask for output tensor (#batch, maxlen_out).
|
| | torch.Tensor: Encoded memory (#batch, maxlen_in, size).
|
| | torch.Tensor: Encoded memory mask (#batch, maxlen_in).
|
| |
|
| | """
|
| | residual = tgt
|
| | if self.normalize_before:
|
| | tgt = self.norm1(tgt)
|
| |
|
| | if cache is None:
|
| | tgt_q = tgt
|
| | tgt_q_mask = tgt_mask
|
| | else:
|
| |
|
| | assert cache.shape == (
|
| | tgt.shape[0],
|
| | tgt.shape[1] - 1,
|
| | self.size,
|
| | ), "{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
|
| | tgt_q = tgt[:, -1:, :]
|
| | residual = residual[:, -1:, :]
|
| | tgt_q_mask = tgt_mask[:, -1:, :]
|
| |
|
| | x = residual + self.dropout(
|
| | self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0])
|
| | if not self.normalize_before:
|
| | x = self.norm1(x)
|
| |
|
| | if self.src_attn is not None:
|
| | residual = x
|
| | if self.normalize_before:
|
| | x = self.norm2(x)
|
| | x = residual + self.dropout(
|
| | self.src_attn(x, memory, memory, memory_mask)[0])
|
| | if not self.normalize_before:
|
| | x = self.norm2(x)
|
| |
|
| | residual = x
|
| | if self.normalize_before:
|
| | x = self.norm3(x)
|
| | x = residual + self.dropout(self.feed_forward(x))
|
| | if not self.normalize_before:
|
| | x = self.norm3(x)
|
| |
|
| | if cache is not None:
|
| | x = torch.cat([cache, x], dim=1)
|
| |
|
| | return x, tgt_mask, memory, memory_mask
|
| |
|