repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/compat.py
|
import torch
import functools
def custom_fwd(fwd=None, **kwargs):
"""
Helper decorator for ``forward`` methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`). See the :ref:`example page<amp-custom-examples>` for more detail.
Arguments:
cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``,
when ``forward`` runs in an autocast-enabled region, casts incoming
floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected),
then executes ``forward`` with autocast disabled.
If ``None``, ``forward``'s internal ops execute with the current autocast state.
.. note::
If the decorated ``forward`` is called outside an autocast-enabled region,
:func:`custom_fwd<custom_fwd>` is a no-op and ``cast_inputs`` has no effect.
"""
if fwd is None:
if len(kwargs) == 0:
cast_inputs = None
else:
assert len(kwargs) == 1
cast_inputs = kwargs["cast_inputs"]
return functools.partial(custom_fwd, cast_inputs=cast_inputs)
if len(kwargs) == 0:
cast_inputs = None
else:
assert len(kwargs) == 1
cast_inputs = kwargs["cast_inputs"]
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
if cast_inputs is None:
return fwd(*args, **kwargs)
else:
return fwd(*args, **kwargs)
return decorate_fwd
def custom_bwd(bwd):
"""
Helper decorator for backward methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`).
Ensures that ``backward`` executes with the same autocast state as ``forward``.
See the :ref:`example page<amp-custom-examples>` for more detail.
"""
@functools.wraps(bwd)
def decorate_bwd(*args, **kwargs):
return bwd(*args, **kwargs)
return decorate_bwd
def _decorator_helper(orig_fn, cast_fn, wrap_fn):
def wrapper(*args, **kwargs):
return orig_fn(*args, **kwargs)
return wrapper
def half_function(fn):
return _decorator_helper(fn, None, None)
| 2,177
| 31.507463
| 106
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/fused_adam.py
|
import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import fused_optim
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.available = False
MultiTensorApply.import_err = err
def check_avail(self):
if MultiTensorApply.available == False:
raise RuntimeError(
"Attempted to call MultiTensorApply method, but MultiTensorApply "
"is not available, possibly because Apex was installed without "
"--cpp_ext --cuda_ext. Original import error message:",
MultiTensorApply.import_err)
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
self.check_avail()
return op(self.chunk_size,
noop_flag_buffer,
tensor_lists,
*args)
multi_tensor_applier = MultiTensorApply(2048 * 32)
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adam_w_mode=False``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, adam_w_mode=True,
weight_decay=0., amsgrad=False, set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
if multi_tensor_applier.available:
import fused_optim
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_adam = fused_optim.multi_tensor_adam
else:
raise RuntimeError('apex.optimizers.FusedAdam requires cuda extensions')
def zero_grad(self, set_to_none=True):
if set_to_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError(
'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.')
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16 and fp32.')
if len(g_16) > 0:
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
if len(g_32) > 0:
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
return loss
| 8,746
| 44.557292
| 145
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/encdec_attention_func.py
|
"""
Encoder-Decoder multi-head attention.
Code is heavily adapted from apex
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import encdec_multihead_attn_cuda
except (ModuleNotFoundError, ImportError) as e:
encdec_multihead_attn_cuda = None
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
# only 1 term this time
def apply_rotary_pos_emb(q, cos, sin):
return (q * cos) + (rotate_half(q) * sin)
def rotate_backward(dx):
dx2, dx1 = dx[..., :dx.shape[-1] // 2], dx[..., dx.shape[-1] // 2:]
return torch.cat((dx1, -dx2), dim=dx1.ndim - 1)
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, recompute, is_training, heads, inputs_q, inputs_kv,
input_weights_q, input_weights_kv, output_weights,
mask, dropout_prob,
incremental, incremental_cache,
rotary_pos_enc, pos_emb_q, pos_emb_k,
double_precision, return_coverage):
inputs_q = inputs_q.contiguous()
inputs_kv = inputs_kv.contiguous()
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([]).to(inputs_q.device)
head_dim = inputs_q.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
use_mask = (mask is not None)
# it happens!!! when we "factorized the weights"
input_weights_q = input_weights_q.contiguous()
input_weights_kv = input_weights_kv.contiguous()
output_weights = output_weights.contiguous()
bsz, len_q, len_k = inputs_q.size(1), inputs_q.size(0), inputs_kv.size(0)
ctx.incremental = incremental
ctx.fused_softmax_dropout = False
ctx.fused_all = False
ctx.len_q = len_q
ctx.len_k = len_k
ctx.double_precision = double_precision
ctx.return_coverage = return_coverage
ctx.recompute = recompute
ctx.rotary_pos_enc = rotary_pos_enc
if mask is not None:
# Self Attention Pad Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
if encdec_multihead_attn_cuda is not None and not incremental and len_k <= 2048 \
and inputs_q.type() == 'torch.cuda.HalfTensor' and not rotary_pos_enc:
input_lin_q_results, input_lin_kv_results, \
softmax_results, dropout_results, dropout_mask, \
matmul2_results, outputs \
= encdec_multihead_attn_cuda.forward(is_training, heads, inputs_q, inputs_kv,
input_weights_q, input_weights_kv,
output_weights, mask, dropout_prob)
sinq, cosq, = null_tensor, null_tensor
sink, cosk, = null_tensor, null_tensor
if not ctx.recompute:
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
else:
ctx.save_for_backward(heads_t,
scale_t,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
mask,
sinq, cosq, sink, cosk)
ctx.fused_all = True
if return_coverage:
return outputs, softmax_results
else:
return (outputs,)
# Input Linear GEMM Q
# input1: (activations) [seql_q, bsz, embed_dim] -> [len_q * bsz, embed_dim]
# input2: (weights) [embed_dim, embed_dim]. transpose(0, 1)
# output: [len_q * bsz, embed_dim] -> [seql_q, bsz, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_q_results = torch.mm(inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
input_weights_q.transpose(0, 1))
input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads, head_dim)
# Input Linear GEMM KV
# input1: (activations) [seql_k, bsz, embed_dim(1024)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)] (transpose [0,1])
# output: [seql_k, bsz, embed_dim*2]
# GEMM: ( (seql_k*seqs) x embed_dim ) x ( embed_dim x embed_dim*2 ) = (seql_k*seqs x embed_dim*2)
# Slice out k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
input_lin_kv_results = torch.stack([keys, values], dim=-2)
else:
input_lin_kv_results = torch.mm(inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1),
input_weights_kv.size(0))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
# TODO: rotary pos encoding
if rotary_pos_enc:
assert pos_emb_q is not None and pos_emb_k is not None
cosq, sinq = pos_emb_q
queries = apply_rotary_pos_emb(queries, cosq, sinq)
cosk, sink = pos_emb_k
keys_ = apply_rotary_pos_emb(keys, cosk, sink)
keys.copy_(keys_)
else:
sinq, cosq = null_tensor, null_tensor
sink, cosk = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
if matmul1_results.type() == 'torch.cuda.HalfTensor':
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
else:
softmax_results = F.softmax(matmul1_results, dim=-1)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results.transpose(1, 0))
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1)).transpose(0, 1)
# view from [len_q, bsz*heads, head_dim] to [len_q, bsz, embed]
matmul2_results = matmul2_results.contiguous().view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
outputs = torch.mm(matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
output_weights.transpose(0, 1))
outputs = outputs.view(inputs_q.size(0), inputs_q.size(1), output_weights.size(0))
if not ctx.recompute:
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
else:
ctx.save_for_backward(heads_t,
scale_t,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
dropout_mask,
dropout_prob_t,
mask, sinq, cosq, sink, cosk)
del input_lin_q_results, queries
del input_lin_kv_results, keys, values
del matmul1_results, matmul2_results
del softmax_results, dropout_results
dropout_results = null_tensor
if return_coverage:
return (outputs, dropout_results)
else:
return (outputs,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
incremental = ctx.incremental
len_q = ctx.len_q
len_key = ctx.len_k
if ctx.return_coverage:
output_grads, coverage_grads = output_grads
else:
output_grads = output_grads[0]
if ctx.recompute:
heads_t, scale_t, \
inputs_q, inputs_kv, \
input_weights_q, input_weights_kv, output_weights, \
dropout_mask, dropout_prob_t, pad_mask, \
sinq, cosq, sink, cosk, \
= ctx.saved_tensors
else:
heads_t, scale_t, matmul2_results, dropout_results, softmax_results, \
input_lin_q_results, input_lin_kv_results, \
inputs_q, inputs_kv, \
input_weights_q, input_weights_kv, output_weights, \
dropout_mask, dropout_prob_t, \
sinq, cosq, sink, cosk, \
= ctx.saved_tensors
pad_mask = None
head_dim = inputs_q.size(2) // heads_t[0]
bsz = inputs_q.size(1)
if ctx.fused_all:
assert encdec_multihead_attn_cuda is not None and len_key <= 2048
if not ctx.recompute:
input_q_grads, \
input_kv_grads, \
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads \
= encdec_multihead_attn_cuda.backward(heads_t[0], output_grads, matmul2_results,
dropout_results,
softmax_results, input_lin_q_results,
input_lin_kv_results,
inputs_q, inputs_kv, input_weights_q,
input_weights_kv,
output_weights, dropout_mask,
dropout_prob_t[0])
else:
input_q_grads, \
input_kv_grads, \
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads \
= encdec_multihead_attn_cuda.backward_recompute(heads_t[0], output_grads,
inputs_q, inputs_kv,
input_weights_q,
input_weights_kv,
output_weights, dropout_mask,
pad_mask,
dropout_prob_t[0])
return None, None, None, \
input_q_grads, input_kv_grads, \
input_weight_q_grads, input_weight_kv_grads, output_weight_grads, \
None, None, None, None, None, None, None, None, None
if ctx.recompute:
assert ctx.incremental is not True
heads = heads_t[0]
# Recomputing the tensors in the forward pass here
input_lin_q_results = torch.mm(inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
input_weights_q.transpose(0, 1))
input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads, head_dim)
input_lin_kv_results = torch.mm(inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1),
input_weights_kv.size(0))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results.baddbmm_(queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
beta=0.0, alpha=scale_t[0])
if pad_mask is not None:
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(pad_mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
if matmul1_results.type() == 'torch.cuda.HalfTensor':
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
else:
softmax_results = F.softmax(matmul1_results, dim=-1)
if dropout_prob_t[0] > 0:
pinv = 1.0 / (1.0 - dropout_prob_t[0])
dropout_results = softmax_results * dropout_mask * pinv
else:
dropout_results = softmax_results
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results.transpose(1, 0))
matmul2_results = matmul2_results.contiguous().view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Slice out k,v from one big Input Linear output (should only impact meta data, no copies!)
# Batch sizes and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, bsz, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=bsz*heads, 2, head_dim]
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_kv_results_grads = torch.empty_like(input_lin_kv_results)
queries_grads = torch.empty_like(queries)
keys_grads = input_lin_kv_results_grads[:, :, 0, :]
values_grads = input_lin_kv_results_grads[:, :, 1, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1) * heads_t[0],
head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# TODO:
if ctx.rotary_pos_enc:
queries_grads = queries_grads * cosq + rotate_backward(sinq * queries_grads)
keys_grads_ = keys_grads * cosk + rotate_backward(sink * keys_grads)
keys_grads.copy_(keys_grads_)
# Input Q Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
queries_grads = queries_grads.view(inputs_q.size(0) * inputs_q.size(1), heads_t[0] * head_dim)
input_q_grads = torch.mm(queries_grads, input_weights_q)
input_q_grads = input_q_grads.view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Input KV Linear GEMM - DGRAD
# input1: (data grads) [seql_k, seqs, 2*embed_dim(2048)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)]
# output: [seql_k, seqs, embed_dim]
# GEMM: ( (seql_k*seqs) x 2*embed_dim ) x ( 2*embed_dim x embed_dim ) = (seql_k*seqs x embed_dim)
# the elements of values and query grads are already stored in (shared) query_grads and values_grads
input_lin_kv_results_grads = input_lin_kv_results_grads.view(inputs_kv.size(0) * inputs_kv.size(1),
heads_t[0] * 2 * head_dim)
input_kv_grads = torch.mm(input_lin_kv_results_grads, input_weights_kv)
input_kv_grads = input_kv_grads.view(inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2))
# Input Q Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, embed_dim(1024)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [embed_dim, embed_dim]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (embed_dim x embed_dim)
input_weight_q_grads = torch.mm(queries_grads.transpose(0, 1),
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)))
# Input KV Linear GEMM - WGRAD
# input1: (data grads) [seql_k*seqs, 2*embed_dim(2048)]
# input2: (activations) [seql_k*seqs, embed_dim(1024)]
# output: [2*embed_dim, embed_dim]
# GEMM: ( 2*embed_dim x seql_k*seqs ) x ( seql_k*seqs x embed_dim ) = (2*embed_dim x embed_dim)
input_weight_kv_grads = torch.mm(input_lin_kv_results_grads.transpose(0, 1),
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)))
return None, None, None \
, input_q_grads, input_kv_grads \
, input_weight_q_grads, input_weight_kv_grads, output_weight_grads \
, None, None, None, None, None, None, None, None, None
# def encdec_attn_func(time_masking, is_training,
# num_heads, query, key,
# in_proj_weight_q, in_proj_weight_kv,
# out_proj_weight, attn_mask, dropout,
# incremental, incremental_cache,
# use_rotary_enc, pos_emb_q, pos_emb_k,
# double_precision, return_coverage):
# return EncdecAttnFunc.apply(time_masking, is_training,
# num_heads, query, key,
# in_proj_weight_q, in_proj_weight_kv,
# out_proj_weight, attn_mask, dropout,
# incremental, incremental_cache,
# use_rotary_enc, pos_emb_q, pos_emb_k,
# double_precision, return_coverage)
#
# return output, coverage
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def encdec_attn_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return EncdecAttnFunc.apply(*args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = encdec_attn_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
# self.function = RelativeShiftFunction.apply
self.in_proj_weight_q = torch.Tensor(model_size, model_size)
self.in_proj_weight_kv = torch.Tensor(2 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias_q = torch.Tensor(model_size)
self.in_proj_bias_kv = torch.Tensor(2 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
torch.nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias_q, 0.)
torch.nn.init.constant_(self.in_proj_bias_kv, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, in_proj_weight_q, input, context, in_proj_weight_kv, out_proj_weight, mask,
recompute=False, use_rotary_enc=False, pos_emb_q=None, pos_emb_k=None):
is_training = True
dropout = 0.0
double_precision = True
return_coverage = False
# .apply(time_masking, is_training,
# num_heads, query, key,
# in_proj_weight_q, in_proj_weight_kv,
# out_proj_weight, attn_mask, dropout,
# incremental, incremental_cache)
return self.function(recompute, is_training, self.heads, input, context,
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
mask, dropout,
False, None, # For the incremental stuff
use_rotary_enc, pos_emb_q, pos_emb_k,
double_precision, return_coverage) # double precision set to true
bsz = 4
len_q = 5
len_r = 15
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
input_states.requires_grad = True
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight_q = parameters.in_proj_weight_q.double().cuda()
in_proj_weight_kv = parameters.in_proj_weight_kv.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
in_proj_bias_q = parameters.in_proj_bias_q.double().cuda()
in_proj_bias_kv = parameters.in_proj_bias_kv.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
in_proj_weight_q.requires_grad = True
out_proj_weight.requires_grad = True
in_proj_weight_kv.requires_grad = True
in_proj_bias_q.requires_grad = True
in_proj_bias_kv.requires_grad = True
out_proj_bias.requires_grad = True
mask = input_states.new(*(bsz, len_r)).bernoulli_(p=0.25).bool()
# mask = None
print("gradchecking start.")
#
context = torch.randn(*(len_r, bsz, opt.model_size)).double().cuda()
context.requires_grad = True
#
recompute = False
try:
torch.autograd.gradcheck(net, (in_proj_weight_q, input_states, context, in_proj_weight_kv,
out_proj_weight, mask, recompute), atol=1e-04, rtol=0.001)
except RuntimeError as e:
print(e)
print("gradchecking completed.")
# print("gradchecking w/ recomputation start.")
#
# # context = torch.randn(*(len_r, bsz, opt.model_size)).double().cuda()
# # context.requires_grad = True
#
# recompute = True
# torch.autograd.gradcheck(net, (input_states, context, in_proj_weight_q, in_proj_weight_kv,
# out_proj_weight, mask, recompute), atol=1e-05, rtol=0.001)
#
# print("gradchecking completed.")
class SinusoidalEmbeddings(torch.nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x=None, length=0, timestep=-1):
"""
:param timestep:
:param length:
:param x: [time x bsz x hidden]
:return:
"""
# actually this module doesn't care about anything of x except x.size(1)
if x is not None:
assert length == 0 and timestep == -1
n = x.shape[0] # time dimension
elif length > 0:
assert timestep == -1
n = length
elif timestep >= 0:
n = timestep + 1
t = torch.arange(n, device=self.inv_freq.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb
encoder = SinusoidalEmbeddings(opt.head_dim)
encoder = encoder.double().cuda()
pos_emb_q = encoder(length=len_q)
pos_emb_k = encoder(length=len_r)
pos_emb_q.requires_grads = False
pos_emb_k.requires_grads = False
recompute = False
print("gradchecking w/ rotary encoding start.")
torch.autograd.gradcheck(net, (in_proj_weight_q, input_states, context, in_proj_weight_kv,
out_proj_weight, mask, recompute, True, pos_emb_q, pos_emb_k), atol=1e-04,
rtol=0.001)
| 36,308
| 46.964333
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/fast_mha.py
|
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import torch
import torch.nn.functional as F
try:
sm = torch.cuda.get_device_capability()
if sm[0] == 8 and sm[1] == 0:
import fmhalib
elif sm[0] == 8 and sm[1] == 6: # sm86
import fmhalib_sm86 as fmhalib
else:
fmhalib = None
except (ModuleNotFoundError, ImportError) as e:
fmhalib = None
from .linear import linear_blaslt
class FMHAFun(torch.autograd.Function):
"""
BERT Style Multihead Self Attention (Encoder only)
Can be used for wav2vec 2.0
"""
@staticmethod
def forward(ctx, qkv, cu_seqlens, p_dropout, max_s, is_training):
batch_size = cu_seqlens.numel() - 1
if batch_size < 4:
context, S_dmask = fmhalib.fwd_nl(qkv, cu_seqlens, p_dropout, max_s, is_training, None)
else:
context, S_dmask = fmhalib.fwd(qkv, cu_seqlens, p_dropout, max_s, is_training, None)
ctx.save_for_backward(qkv, S_dmask)
ctx.cu_seqlens = cu_seqlens
ctx.p_dropout = p_dropout
ctx.max_s = max_s
return context, S_dmask
@staticmethod
def backward(ctx, dout, dsoftmax):
qkv, S_dmask = ctx.saved_tensors
batch_size = ctx.cu_seqlens.numel() - 1
dout = dout.contiguous() # this happens!!! and can mess up with gradients if dout is a view!!!
if batch_size < 4:
dqkv, dp, _ = fmhalib.bwd_nl(dout.contiguous(), qkv, S_dmask, ctx.cu_seqlens, ctx.p_dropout, ctx.max_s)
else:
dqkv, dp = fmhalib.bwd(dout, qkv, S_dmask, ctx.cu_seqlens, ctx.p_dropout, ctx.max_s)
return dqkv, None, None, None, None, None, None
class FastSelfAttnFunc(torch.autograd.Function):
"""
BERT Style Multihead Self Attention (Encoder only)
Can be used for wav2vec 2.0
"""
@staticmethod
def forward(ctx, input, cu_seqlens, p_dropout, max_s, is_training, num_heads, head_dim, recompute,
in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias):
batch_size = cu_seqlens.numel() - 1
total_bsz = input.size(0)
if batch_size < 4:
output, qkv, context, S_dmask = fmhalib.full_fwd_nl(input, in_proj_weight, in_proj_bias,
out_proj_weight, out_proj_bias,
cu_seqlens, p_dropout, max_s, is_training,
head_dim, num_heads, None)
else:
output, qkv, context, S_dmask = fmhalib.full_fwd(input, in_proj_weight, in_proj_bias,
out_proj_weight, out_proj_bias,
cu_seqlens, p_dropout, max_s, is_training,
head_dim, num_heads, None)
ctx.save_for_backward(context, qkv, input, S_dmask,
in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias)
ctx.cu_seqlens = cu_seqlens
ctx.p_dropout = p_dropout
ctx.max_s = max_s
ctx.num_heads = num_heads
ctx.head_dim = head_dim
ctx.recompute = recompute
return output, S_dmask
@staticmethod
def backward(ctx, dout, dsoftmax):
batch_size = ctx.cu_seqlens.numel() - 1
head_dim = ctx.head_dim
num_heads = ctx.num_heads
total_bsz = dout.size(0)
context, qkv, input, S_dmask, in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias = ctx.saved_tensors
if batch_size < 4:
d_input, in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad = \
fmhalib.full_bwd_nl(dout, qkv, context, S_dmask, input, in_proj_weight, in_proj_bias,
out_proj_weight, out_proj_bias, ctx.cu_seqlens, ctx.p_dropout,
ctx.head_dim, ctx.num_heads, ctx.max_s)
else:
d_input, in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad =\
fmhalib.full_bwd(dout, qkv, context, S_dmask, input, in_proj_weight, in_proj_bias,
out_proj_weight, out_proj_bias, ctx.cu_seqlens, ctx.p_dropout,
ctx.head_dim, ctx.num_heads, ctx.max_s)
del ctx.cu_seqlens
del ctx.p_dropout
del ctx.max_s
del ctx.head_dim
del ctx.num_heads
del ctx.recompute
del context, S_dmask, qkv
return input_grad, None, None, None, None, None, None, \
in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
if fmhalib is not None:
def fast_bert_mha(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return FMHAFun.apply(*args)
def fast_self_attn_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return FastSelfAttnFunc.apply(*args)
else:
fast_bert_mha = None
fast_self_attn_func = None
| 7,225
| 40.768786
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/flash_mha.py
|
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import torch
import torch.nn.functional as F
try:
import flash_attn_cuda
except (ModuleNotFoundError, ImportError) as e:
flash_attn_cuda = None
def _get_block_size(device, head_dim, is_dropout):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal, return_softmax, num_splits=0,
generator=None):
"""
num_splits: how much to parallelize over the seqlen_q dimension. num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking.
Don't change it unless you know what you're doing.
"""
softmax_lse, *rest = flash_attn_cuda.fwd(
q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, False, causal, return_softmax, num_splits, generator
)
# if out.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return out, softmax_lse, S_dmask
def _flash_attn_backward(dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, num_splits=0,
generator=None):
"""
num_splits: whether to parallelize over the seqlen_k dimension (num_splits > 1) or
not (num_splits = 1). num_splits=0 means it will be set by an internal heuristic.
Any value above 1 will call the same kernel (i.e. num_splits=2 would call the same kernel
as num_splits=3), so effectively the choices are 0, 1, and 2.
This hyperparameter can be tuned for performance, but default value (heuristic) should work fine.
"""
dout = dout.contiguous() # CUDA code assumes that dout is contiguous
_, _, _, softmax_d = flash_attn_cuda.bwd(
dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal, num_splits, generator)
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dq, dk, dv, softmax_d
# def _flash_attn_forward(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
# softmax_scale, causal, return_softmax):
# out, softmax_lse, *rest = flash_attn_cuda.fwd(
# q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale,
# False, causal, return_softmax, None
# )
# # if out.isnan().any() or softmax_lse.isnan().any():
# # breakpoint()
# S_dmask = rest[0] if return_softmax else None
# return out, softmax_lse, S_dmask
# def _flash_attn_backward(dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
# max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal):
# softmax_d = flash_attn_cuda.bwd(
# dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
# max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal, None)
# # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# # breakpoint()
# return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], torch.empty_like(qkv[:, 0]),
cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen = max_seqlen
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens, cu_seqlens,
ctx.max_seqlen, ctx.max_seqlen, ctx.dropout_p, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
q, kv[:, 0], kv[:, 1], torch.empty_like(q),
cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(
dout, q, kv[:, 0], kv[:, 1], out, softmax_lse,
dq, dkv[:, 0], dkv[:, 1], cu_seqlens_q, cu_seqlens_k,
ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dq, dkv, None, None, None, None, None, None, None, None
class FlashMHAFun(torch.autograd.Function):
"""
BERT Style Multihead Self Attention (Encoder only)
Can be used for wav2vec 2.0
"""
@staticmethod
def forward(ctx, qkv, cu_seqlens, dropout_p, max_s, softmax_scale, causal):
# def forward(ctx, qkv, cu_seqlens, p_dropout, max_s, is_training):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
# by default scale is 1/sqrt(head_dim)
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
context, softmax_lse, S_dmask = _flash_attn_forward(
qkv, cu_seqlens, dropout_p, max_s, softmax_scale, causal=causal, return_softmax=False
)
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_s = max_s
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return context
@staticmethod
def backward(ctx, dout):
qkv, context, S_dmask, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
# restore rng state to recompute dropout
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
# S_dmask is None, temporarily use another tensor just to get it running
dqkv = _flash_attn_backward(
dout, qkv, context, context, softmax_lse, cu_seqlens, ctx.dropout_p,
ctx.max_s, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
if flash_attn_cuda is not None:
def flash_bert_mha(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return FlashAttnQKVPackedFunc.apply(*args)
def flash_encdec_mha(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return FlashAttnKVPackedFunc.apply(*args)
else:
flash_bert_mha = None
flash_encdec_mha = None
| 11,249
| 44.731707
| 101
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/rotary_encodings.py
|
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class SinusoidalEmbeddings(torch.nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x=None, length=0, timestep=-1):
"""
:param timestep:
:param length:
:param x: [time x bsz x hidden]
:return:
"""
# actually this module doesn't care about anything of x except x.size(1)
if x is not None:
assert length == 0 and timestep == -1
n = x.shape[0] # time dimension
elif length > 0:
assert timestep == -1
n = length
elif timestep >= 0:
n = timestep + 1
t = torch.arange(n, device=self.inv_freq.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb
| 1,081
| 30.823529
| 80
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/relative_self_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .relative_self_attention_func import relative_self_attn_func
from .relative_self_attention_func import RelativeShift
import onmt
class RelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., learnable_pos=False, max_pos=0):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.learnable_pos = learnable_pos
self.autograd = False
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if self.learnable_pos:
# If using learnable position embeddings, then assign embeddings for 2N + 1 max positions
# (embeddings are shared across heads)
assert max_pos >= 1
self.pos_emb = nn.Embedding(2 * max_pos + 1, self.head_dim)
self.pos_proj_weight, self.pos_proj_bias = None, None
else:
# Using sin/cos position encodings which are linearly projected to head_dim (seperately per head)
self.pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.reset_parameters()
self.attn_func = relative_self_attn_func
def convert_autograd(self):
if self.autograd:
return
self.autograd = True
with torch.no_grad():
self.in_linear = torch.nn.Linear(self.embed_dim, 3 * self.embed_dim)
self.out_linear = torch.nn.Linear(self.embed_dim, self.embed_dim)
if not self.learnable_pos:
self.pos_linear = torch.nn.Linear(self.embed_dim, self.embed_dim)
self.pos_linear.weight.copy_(self.pos_proj_weight)
self.pos_linear.bias.copy_(self.pos_proj_bias)
del self.pos_proj_weight
del self.pos_proj_bias
self.in_linear.weight.copy_(self.in_proj_weight)
self.in_linear.bias.copy_(self.in_proj_bias)
self.out_linear.weight.copy_(self.out_proj_weight)
self.out_linear.bias.copy_(self.out_proj_bias)
del self.in_proj_weight
del self.out_proj_weight
del self.in_proj_bias
del self.out_proj_bias
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
if self.pos_proj_weight is not None:
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
if self.pos_proj_weight is not None:
nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
if self.pos_proj_bias is not None:
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, mems=None,
incremental=False, incremental_cache=None):
"""
:param recompute:
:param input: [T x B x H]
:param pos: [T x 1 x H] or [T x T x H]
:param key_padding_mask: [1 x T x B]
:param attn_mask: [T x T]
:param mems:
:param incremental:
:param incremental_cache:
:return:
"""
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
# [1 x T x B] -> [B x T]
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
is_training = self.training
if self.learnable_pos:
# [len_q x len_k] -> [len_q x len_k x head_dim]
pos = self.pos_emb(pos)
if self.autograd:
# assert not self.training, "Auto-grad mode only used in Evaluation (for Quantization)."
bsz = input.size(1)
heads = self.num_heads
head_dim = self.head_dim
len_q = input.size(0)
len_k = len_q
input_lin_results = self.in_linear(input)
scale_t = torch.tensor([head_dim ** -0.5])
use_time_mask = attn_mask is not None
if mask is not None:
mask = mask.to(torch.bool)
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
mask = mask.unsqueeze(0).unsqueeze(0)
# Key Padding Mask
else:
mask = mask.unsqueeze(1).unsqueeze(2)
if not self.learnable_pos:
pos_lin_results = self.pos_linear(pos)
r_head_k = pos_lin_results.view(pos.size(0), bsz * self.num_heads, self.head_dim)
input_lin_results = input_lin_results.view(input.size(0), input.size(1) * self.num_heads, 3, self.head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
# We have to change the heads x head_dim first and then concat to the T dim
# bsz is changed during translation due to beam search
# during translation we want to keep the actual T dim in MM as 1 constantly
keys = keys.reshape(len_q, bsz, heads * head_dim)
values = values.reshape(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
# re-update len_k to be the newly updated length of the keys
len_k = keys.size(0)
rw_head_q = queries.view(len_q, bsz, heads, head_dim) + self.r_w_bias
rw_head_q = rw_head_q.view(len_q, bsz * heads, head_dim)
matmul_ac = torch.bmm(rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2)).mul_(scale_t[0])
rr_head_q = queries.view(len_q, bsz, heads, head_dim) + self.r_r_bias
rr_head_q = rr_head_q.view(len_q, bsz * heads, head_dim)
if not self.learnable_pos:
matmul_bd = torch.matmul(rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2)) \
.mul_(scale_t[0])
matmul_bd = RelativeShift.forward(matmul_bd, True, False)
matmul_bd = matmul_bd[:, :, :len_k]
attn_score = matmul_ac + matmul_bd
else:
matmul_ac.transpose(0, 1).baddbmm_(rr_head_q, pos.transpose(1, 2), beta=1.0, alpha=scale_t[0])
attn_score = matmul_ac
if mask is not None:
attn_score.view(bsz, heads, len_q, len_k).masked_fill_(mask, float('-inf'))
softmax_results = F.softmax(attn_score, dim=-1)
dropout_results = F.dropout(softmax_results, self.dropout, training=self.training)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1)).transpose(0, 1)
matmul2_results = matmul2_results.contiguous().view(len_q, bsz, self.embed_dim)
outputs = self.out_linear(matmul2_results)
return outputs, softmax_results
else:
recompute = onmt.constants.recompute
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
self.in_proj_weight, self.out_proj_weight, self.pos_proj_weight,
self.in_proj_bias, self.out_proj_bias, self.pos_proj_bias,
self.r_w_bias, self.r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False,
self.learnable_pos, True, recompute)
# last Falses are double precision, learnable_embedding and return coverage
return outputs, coverage
| 10,003
| 42.307359
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/self_attention_func.py
|
"""
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import self_multihead_attn_cuda
except (ModuleNotFoundError, ImportError) as e:
self_multihead_attn_cuda = None
try:
import self_multihead_attn_blaslt
except (ModuleNotFoundError, ImportError) as e:
self_multihead_attn_blaslt = None
def rotate_half(x):
# this function works the same with 3D or 2D tensors
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
# q: seq_len x (bszxhead) x headsize
# k: seq_len x (bszxhead) x headsize
# cos: seq_len x 1 x head_size
# sin: seq_len x 1 x head_Size
# or
# q: (total_bsz) x head x head_size
# k: (total_bsz) x head x head_size
# sin: (total_bsz) x 1 x head_size
# cos: (total_bsz) x 1 x head_size
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
def rotate_backward(dx):
dx2, dx1 = dx[..., :dx.shape[-1] // 2], dx[..., dx.shape[-1] // 2:]
return torch.cat((dx1, -dx2), dim=dx1.ndim - 1)
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
@custom_fwd()
def forward(ctx, use_time_mask, is_training, heads, inputs,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout_prob,
rotary_pos_enc, pos_emb,
incremental, incremental_cache,
low_precision, return_coverage, recompute):
inputs = inputs.contiguous()
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
ctx.rotary_pos_enc = rotary_pos_enc
ctx.return_coverage = return_coverage
ctx.low_precision = low_precision
ctx.use_time_mask = use_time_mask
ctx.recompute = recompute
input_weights = input_weights.contiguous()
output_weights = output_weights.contiguous()
bsz, len_q = inputs.size(1), inputs.size(0)
# print(low_precision, incremental, inputs.type())
if low_precision and self_multihead_attn_blaslt is not None and not incremental and len_q <= 2048 \
and inputs.type() == 'torch.cuda.HalfTensor' \
and not rotary_pos_enc:
ctx.fused = True
if mask is not None:
if use_time_mask:
mask = mask.bool()
else: # [b x len_k] -> [b x 1 x 1 x len_k]
mask = mask.unsqueeze(1).unsqueeze(2).bool()
else:
if use_time_mask:
mask = inputs.new(len_q, len_q).zero_().bool()
else:
mask = inputs.new(bsz, 1, 1, len_q).zero_().bool() # works
cuda_module = self_multihead_attn_blaslt
input_lin_results, \
attn_scores, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = cuda_module.forward(use_time_mask, is_training, heads,
inputs.contiguous(), input_weights, output_weights,
input_biases, output_biases,
mask, dropout_prob)
if recompute:
matmul2_results, dropout_results, attn_scores, input_lin_results = None, None, None, None
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
attn_scores,
input_lin_results,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
dropout_mask,
dropout_prob_t,
mask)
return outputs, dropout_results
ctx.fused = False
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
input_lin_results = torch.addmm(input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1., alpha=1.)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
len_k = keys.size(0)
# apply rotary position encodings
if rotary_pos_enc:
assert pos_emb is not None and pos_emb is not None
cos, sin = pos_emb
queries_, keys_ = apply_rotary_pos_emb(queries, keys, cos, sin)
queries.copy_(queries_)
keys.copy_(keys_)
else:
sin, cos = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
# Softmax and Dropout attention
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
nan_mask = torch.isnan(dropout_results)
if nan_mask.any():
dropout_results.masked_fill_(nan_mask, 0)
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=queries.device).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1),
inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
outputs = torch.addmm(output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
dropout_mask,
dropout_prob_t,
sin, cos)
if return_coverage:
return (outputs, dropout_results)
else:
return (outputs,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
if ctx.return_coverage:
output_grads, coverage_grads = output_grads
else:
output_grads = output_grads[0]
if ctx.fused:
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
attn_scores, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
mask, \
dropout_mask, \
dropout_prob_t, pad_mask = ctx.saved_tensors
if input_weights.requires_grad:
cuda_module = self_multihead_attn_blaslt
if ctx.recompute:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
cuda_module.backward_recompute(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), inputs, input_weights,
output_weights, input_biases, output_biases,
mask, dropout_mask, dropout_prob_t[0])
else:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
cuda_module.backward(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), matmul2_results,
dropout_results, attn_scores,
input_lin_results, inputs, input_weights,
output_weights, dropout_mask, dropout_prob_t[0])
else:
input_grads = self_multihead_attn_cuda.backward_input_only(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), matmul2_results,
dropout_results, attn_scores,
input_lin_results, inputs, input_weights,
output_weights, dropout_mask,
dropout_prob_t[0])
input_weight_grads = None
input_bias_grads = None
output_weight_grads = None
output_bias_grads = None
return None, None, None, \
input_grads, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None, None, None
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
mask, \
dropout_mask, \
dropout_prob_t, \
sin, cos = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t.item()
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
len_key = keys.size(0)
# Slice out q,k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_grads = output_grads.contiguous()
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
if output_weights.requires_grad:
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
else:
output_weight_grads = None
output_bias_grads = None
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
if ctx.rotary_pos_enc:
queries_grads_ = queries_grads * cos + rotate_backward(sin * queries_grads)
keys_grads_ = keys_grads * cos + rotate_backward(sin * keys_grads)
queries_grads.copy_(queries_grads_)
keys_grads.copy_(keys_grads_)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(inputs.size(0) * inputs.size(1),
heads_t[0] * 3 * head_dim)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
if input_weights.requires_grad:
input_weight_grads = torch.mm(input_lin_results_grads.transpose(0, 1),
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)))
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_weight_grads = None
input_bias_grads = None
return None, None, None, \
input_grads, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def self_attn_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return SelfAttnFunc.apply(*args)
class SelfAttnCompactFunc(torch.autograd.Function):
@staticmethod
@custom_fwd()
def forward(ctx, use_time_mask, is_training, heads, input_lin_results,
mask, dropout_prob,
rotary_pos_enc, pos_emb,
incremental, incremental_cache,
low_precision, return_coverage, recompute):
input_lin_results = input_lin_results.contiguous()
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
embed_dim = input_lin_results.size(2) // 3
head_dim = embed_dim // heads
scale_t = torch.tensor([head_dim ** -0.5])
ctx.rotary_pos_enc = rotary_pos_enc
ctx.return_coverage = return_coverage
ctx.low_precision = low_precision
ctx.use_time_mask = use_time_mask
ctx.recompute = recompute
bsz, len_q = input_lin_results.size(1), input_lin_results.size(0)
# print(low_precision, incremental, inputs.type())
if low_precision and self_multihead_attn_blaslt is not None and not incremental and len_q <= 2048 \
and input_lin_results.type() == 'torch.cuda.HalfTensor' \
and not rotary_pos_enc:
ctx.fused = True
if mask is not None:
if use_time_mask:
mask = mask.bool()
else: # [b x len_k] -> [b x 1 x 1 x len_k]
mask = mask.unsqueeze(1).unsqueeze(2).bool()
else:
if use_time_mask:
mask = input_lin_results.new(len_q, len_q).zero_().bool()
else:
mask = input_lin_results.new(bsz, 1, 1, len_q).zero_().bool() # works
cuda_module = self_multihead_attn_blaslt
attn_scores, \
dropout_results, \
dropout_mask, \
matmul2_results = cuda_module.forward_compact(use_time_mask, is_training, heads,
input_lin_results,
mask, dropout_prob)
ctx.save_for_backward(heads_t,
scale_t,
dropout_results,
attn_scores,
input_lin_results,
mask,
dropout_mask,
dropout_prob_t,
mask)
if return_coverage:
return (matmul2_results, dropout_results)
else:
return (matmul2_results,)
ctx.fused = False
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(input_lin_results.size(0), input_lin_results.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
len_k = keys.size(0)
# apply rotary position encodings
if rotary_pos_enc:
assert pos_emb is not None and pos_emb is not None
cos, sin = pos_emb
queries_, keys_ = apply_rotary_pos_emb(queries, keys, cos, sin)
queries.copy_(queries_)
keys.copy_(keys_)
else:
sin, cos = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
# Softmax and Dropout attention
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
nan_mask = torch.isnan(dropout_results)
if nan_mask.any():
dropout_results.masked_fill_(nan_mask, 0)
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty(( dropout_results.size(0), dropout_results.size(1), values.size(2)),
dtype=dropout_results.dtype, device=queries.device)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
# # [seqs*heads, seql_q, head_dim] -> [seql_q, seqs*heads, head_dim]
matmul2_results = matmul2_results.transpose(0, 1).contiguous()
ctx.save_for_backward(heads_t,
scale_t,
dropout_results,
softmax_results,
input_lin_results,
mask,
dropout_mask,
dropout_prob_t,
sin, cos)
if return_coverage:
return (matmul2_results, dropout_results)
else:
return (matmul2_results,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
if ctx.return_coverage:
output_lin_grads, coverage_grads = output_grads
else:
output_lin_grads = output_grads[0]
if ctx.fused:
heads_t, \
scale_t, \
dropout_results, \
attn_scores, \
input_lin_results, \
mask, \
dropout_mask, \
dropout_prob_t, pad_mask = ctx.saved_tensors
cuda_module = self_multihead_attn_blaslt
if ctx.recompute:
raise NotImplementedError
else:
input_lin_results_grads, = cuda_module.backward_compact(ctx.use_time_mask, heads_t[0],
output_lin_grads.contiguous(),
dropout_results, attn_scores,
input_lin_results, dropout_mask, dropout_prob_t[0])
return None, None, None, \
input_lin_results_grads, \
None, None, \
None, None, \
None, None, \
None, None, None
heads_t, \
scale_t, \
dropout_results, \
softmax_results, \
input_lin_results, \
mask, \
dropout_mask, \
dropout_prob_t, \
sin, cos = ctx.saved_tensors
embed_dim = input_lin_results.size(2)
head_dim = embed_dim // heads_t.item()
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(input_lin_results.size(0), input_lin_results.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
len_key = keys.size(0)
len_q = input_lin_results.size(0)
batches = input_lin_results.size(1)
# Slice out q,k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = output_lin_grads.contiguous()
output_lin_grads = output_lin_grads.view(len_q, batches * heads_t[0], head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
if ctx.rotary_pos_enc:
queries_grads_ = queries_grads * cos + rotate_backward(sin * queries_grads)
keys_grads_ = keys_grads * cos + rotate_backward(sin * keys_grads)
queries_grads.copy_(queries_grads_)
keys_grads.copy_(keys_grads_)
return None, None, None, \
input_lin_results_grads, \
None, None, \
None, None, \
None, None, \
None, None, None
def self_attn_compact_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return SelfAttnCompactFunc.apply(*args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = self_attn_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.in_proj_weight = torch.Tensor(3 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias = torch.Tensor(3 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, input_weights, output_weights, input, input_biases, output_biases, mask,
use_time_mask=False):
is_training = True
dropout = 0.0
double_precision = True
return_coverage = False
# use_time_mask, is_training, heads, inputs,
# input_weights, output_weights,
# input_biases, output_biases,
# mask, dropout_prob,
# rotary_pos_enc, pos_emb,
# incremental, incremental_cache,
# return_coverage
return self.function(use_time_mask, is_training, self.heads, input,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout,
False, None, # For the incremental stuff
False, None,
return_coverage, False) # double precision set to true
bsz = 4
len_q = 15
len_r = len_q
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
input_states.requires_grad = True
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight = parameters.in_proj_weight.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
in_proj_bias = parameters.in_proj_bias.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
in_proj_weight.requires_grad = True
out_proj_weight.requires_grad = True
in_proj_bias.requires_grad = True
out_proj_bias.requires_grad = True
mask = input_states.new(*(bsz, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking start.")
use_time_mask = False
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, use_time_mask), atol=1e-04, rtol=0.001)
mask = input_states.new(*(len_q, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking with time mask start.")
use_time_mask = True
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, use_time_mask), atol=1e-04, rtol=0.001)
print("gradchecking completed.")
| 43,104
| 44.421496
| 130
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/encdec_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_attention_func import encdec_attn_func
import onmt
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0.):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.register_parameter('in_proj_bias_q', None)
self.register_parameter('in_proj_bias_kv', None)
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
self.attn_func = encdec_attn_func
self.reset_parameters()
self.autograd = False
def convert_autograd(self):
if self.autograd:
return
with torch.no_grad():
self.autograd = True
self.linear_q = torch.nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.linear_kv = torch.nn.Linear(self.embed_dim, 2 * self.embed_dim, bias=False)
self.out_linear = torch.nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.linear_q.weight.copy_(self.in_proj_weight_q)
self.linear_kv.weight.copy_(self.in_proj_weight_kv)
self.out_linear.weight.copy_(self.out_proj_weight)
del self.in_proj_weight_q
del self.in_proj_weight_kv
del self.out_proj_weight
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
def forward(self, query, key, value,
attn_mask=None, incremental=False, incremental_cache=None,
rotary_pos_enc=False, pos_emb_q=None, pos_emb_k=None,
**kwargs):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
if self.autograd:
# assert not self.training
mask = attn_mask
if mask is not None:
# Self Attention Pad Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
len_q = query.size(0)
len_k = key.size(0)
bsz = query.size(1)
heads = self.num_heads
head_dim = self.head_dim
scale_t = torch.tensor([head_dim ** -0.5])
input_lin_q_results = self.linear_q(query)
queries = input_lin_q_results.view(len_q, bsz * heads, head_dim)
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
else:
input_lin_kv_results = self.linear_kv(key)
input_lin_kv_results = input_lin_kv_results.view(len_k, bsz * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
matmul1_results = matmul1_results.view(bsz, heads, len_q, len_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, len_q, len_k)
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
dropout_results = F.dropout(softmax_results, self.dropout, training=self.training)
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1)).transpose(0, 1)
matmul2_results = matmul2_results.contiguous().view(len_q, bsz, self.embed_dim)
outputs = self.out_linear(matmul2_results)
return outputs, softmax_results
else:
recompute = onmt.constants.recompute
outputs, coverage = self.attn_func(recompute, is_training,
self.num_heads, query, key,
self.in_proj_weight_q, self.in_proj_weight_kv,
self.out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache,
rotary_pos_enc, pos_emb_q, pos_emb_k,
False, True) # double precision False and return coverage True
return outputs, coverage
| 6,634
| 41.261146
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/setup.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
print(cpp_extension.CUDA_HOME)
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
# ext_modules.append(
# CUDAExtension(name='encdec_multihead_attn_cuda',
# sources=['multihead_attn/encdec_multihead_attn.cpp',
# 'multihead_attn/encdec_multihead_attn_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
# extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
# 'nvcc': ['-O3',
# '-I./cutlass/',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + version_dependent_macros +
# generator_flag + cc_flag}))
#
ext_modules.append(
CUDAExtension(name='self_multihead_attn_cuda',
sources=['multihead_attn/self_multihead_attn.cpp',
'multihead_attn/self_multihead_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
#
ext_modules.append(
CUDAExtension(name='encdec_multihead_attn_bias_cuda',
sources=['multihead_attn/encdec_multihead_attn_bias.cpp',
'multihead_attn/encdec_multihead_attn_bias_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
#
ext_modules.append(
CUDAExtension(name='fused_dropout_add_cuda',
sources=['dropout_add/fused_dropout_add.cpp',
'dropout_add/fused_dropout_add_cuda_kernel.cu'],
extra_compile_args={'cxx': ['-O3', ],
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + cc_flag + generator_flag}))
# ext_modules.append(
# CUDAExtension(name='mask_softmax_dropout_cuda',
# sources=['multihead_attn/masked_softmax_dropout.cpp',
# 'multihead_attn/masked_softmax_dropout_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
# extra_compile_args={'cxx': ['-O3', ],
# 'nvcc': ['-O3',
# '-I./cutlass/include',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
# ext_modules.append(
# CUDAExtension(name='rel_self_attn_cuda',
# sources=['relative_self_attn.cpp',
# 'relative_self_attn_cuda.cu'],
# extra_compile_args={'cxx': ['-O3',],
# 'nvcc':['-O3',
# '-I./cutlass/',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
ext_modules.append(
CUDAExtension(name='fast_layer_norm_cuda',
sources=['layer_norm/ln_api.cpp',
'layer_norm/ln_fwd_cuda_kernel.cu',
'layer_norm/ln_bwd_semi_cuda_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'include')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_BFLOAT16_OPERATORS__',
'-U__CUDA_NO_BFLOAT16_CONVERSIONS__',
'-U__CUDA_NO_BFLOAT162_OPERATORS__',
'-U__CUDA_NO_BFLOAT162_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + cc_flag + version_dependent_macros + generator_flag}))
#
#
# ext_modules.append(
# CUDAExtension(name='fused_optim',
# sources=['fused_optim/frontend.cpp',
# 'fused_optim/multi_tensor_scale_kernel.cu',
# 'fused_optim/multi_tensor_axpby_kernel.cu',
# 'fused_optim/multi_tensor_l2norm_kernel.cu',
# 'fused_optim/multi_tensor_l2norm_scale_kernel.cu',
# 'fused_optim/multi_tensor_adam.cu'],
# include_dirs=[os.path.join(this_dir, 'include')],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-lineinfo',
# '-O3',
# '--resource-usage',
# '--use_fast_math'] + version_dependent_macros}))
#
# MLP functions
ext_modules.append(
CUDAExtension(name='fused_mlp_relu',
sources=['mlp/mlp_relu.cpp',
'mlp/mlp_relu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros + generator_flag}))
# ext_modules.append(
# CUDAExtension(name='fused_mlp_silu',
# sources=['mlp/mlp_silu.cpp',
# 'mlp/mlp_silu_cuda.cu'],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-O3'] + version_dependent_macros}))
#
# Approximated GELU function
# ext_modules.append(
# CUDAExtension(name='fused_mlp_agelu',
# sources=['mlp/mlp_agelu.cpp',
# 'mlp/mlp_agelu_cuda.cu'],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-O3'] + cc_flag + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fused_mlp_gelu',
sources=['mlp/mlp_gelu.cpp',
'mlp/mlp_gelu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros + generator_flag}))
ext_modules.append(
CUDAExtension(name='xentropy_cuda',
sources=['xentropy/interface.cpp',
'xentropy/xentropy_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'include')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros}))
if bare_metal_minor >= 5 and bare_metal_major >= 11:
ext_modules.append(
CUDAExtension(name='mlp_gelu_blaslt',
sources=['mlp_blaslt/mlp_gelu.cpp',
'mlp_blaslt/mlp_gelu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros +
generator_flag + cc_flag}))
# TODO: self-attn and enc-attn blaslt
ext_modules.append(
CUDAExtension(name='self_multihead_attn_blaslt',
sources=['multihead_attn_blaslt/self_multihead_attn.cpp',
'multihead_attn_blaslt/self_multihead_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
ext_modules.append(
CUDAExtension(name='encdec_multihead_attn_bias_blaslt',
sources=['multihead_attn_blaslt/encdec_multihead_attn_bias.cpp',
'multihead_attn_blaslt/encdec_multihead_attn_bias_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
ext_modules.append(
CUDAExtension(name='relative_self_attn_blaslt',
sources=['multihead_attn_blaslt/relative_self_attn.cpp',
'multihead_attn_blaslt/relative_self_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
ext_modules.append(
CUDAExtension(name='self_multihead_attn_bias_blaslt',
sources=['multihead_attn_blaslt/self_multihead_attn_bias.cpp',
'multihead_attn_blaslt/self_multihead_attn_bias_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
setup(
name='nmtgminor_cuda',
version='0.1',
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 17,708
| 51.862687
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/setup_base.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
print(cpp_extension.CUDA_HOME)
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
# ext_modules.append(
# CUDAExtension(name='encdec_multihead_attn_cuda',
# sources=['multihead_attn/encdec_multihead_attn.cpp',
# 'multihead_attn/encdec_multihead_attn_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
# extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
# 'nvcc': ['-O3',
# '-I./cutlass/',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + version_dependent_macros +
# generator_flag + cc_flag}))
#
# ext_modules.append(
# CUDAExtension(name='self_multihead_attn_cuda',
# sources=['multihead_attn/self_multihead_attn.cpp',
# 'multihead_attn/self_multihead_attn_cuda.cu'],
# extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
# 'nvcc': ['-O3',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + version_dependent_macros +
# generator_flag}))
# #
# ext_modules.append(
# CUDAExtension(name='encdec_multihead_attn_bias_cuda',
# sources=['multihead_attn/encdec_multihead_attn_bias.cpp',
# 'multihead_attn/encdec_multihead_attn_bias_cuda.cu'],
# extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
# 'nvcc': ['-O3',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + version_dependent_macros +
# generator_flag}))
# #
# ext_modules.append(
# CUDAExtension(name='fused_dropout_add_cuda',
# sources=['dropout_add/fused_dropout_add.cpp',
# 'dropout_add/fused_dropout_add_cuda_kernel.cu'],
# extra_compile_args={'cxx': ['-O3', ],
# 'nvcc': ['-O3',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag + generator_flag}))
# ext_modules.append(
# CUDAExtension(name='mask_softmax_dropout_cuda',
# sources=['multihead_attn/masked_softmax_dropout.cpp',
# 'multihead_attn/masked_softmax_dropout_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
# extra_compile_args={'cxx': ['-O3', ],
# 'nvcc': ['-O3',
# '-I./cutlass/include',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
# ext_modules.append(
# CUDAExtension(name='rel_self_attn_cuda',
# sources=['relative_self_attn.cpp',
# 'relative_self_attn_cuda.cu'],
# extra_compile_args={'cxx': ['-O3',],
# 'nvcc':['-O3',
# '-I./cutlass/',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
ext_modules.append(
CUDAExtension(name='fast_layer_norm_cuda',
sources=['layer_norm/ln_api.cpp',
'layer_norm/ln_fwd_cuda_kernel.cu',
'layer_norm/ln_bwd_semi_cuda_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'include')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_BFLOAT16_OPERATORS__',
'-U__CUDA_NO_BFLOAT16_CONVERSIONS__',
'-U__CUDA_NO_BFLOAT162_OPERATORS__',
'-U__CUDA_NO_BFLOAT162_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + cc_flag + version_dependent_macros + generator_flag}))
#
#
# ext_modules.append(
# CUDAExtension(name='fused_optim',
# sources=['fused_optim/frontend.cpp',
# 'fused_optim/multi_tensor_scale_kernel.cu',
# 'fused_optim/multi_tensor_axpby_kernel.cu',
# 'fused_optim/multi_tensor_l2norm_kernel.cu',
# 'fused_optim/multi_tensor_l2norm_scale_kernel.cu',
# 'fused_optim/multi_tensor_adam.cu',
# 'fused_optim/multi_tensor_lamb_stage_1.cu',
# 'fused_optim/multi_tensor_lamb_stage_2.cu',
# 'fused_optim/multi_tensor_lamb.cu'],
# include_dirs=[os.path.join(this_dir, 'include')],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-lineinfo',
# '-O3',
# '--resource-usage',
# '--use_fast_math'] + version_dependent_macros}))
#
# MLP functions
ext_modules.append(
CUDAExtension(name='fused_mlp_relu',
sources=['mlp/mlp_relu.cpp',
'mlp/mlp_relu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros + generator_flag}))
# ext_modules.append(
# CUDAExtension(name='fused_mlp_silu',
# sources=['mlp/mlp_silu.cpp',
# 'mlp/mlp_silu_cuda.cu'],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-O3'] + version_dependent_macros}))
#
# Approximated GELU function
# ext_modules.append(
# CUDAExtension(name='fused_mlp_agelu',
# sources=['mlp/mlp_agelu.cpp',
# 'mlp/mlp_agelu_cuda.cu'],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc': ['-O3'] + cc_flag + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fused_mlp_gelu',
sources=['mlp/mlp_gelu.cpp',
'mlp/mlp_gelu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros + generator_flag}))
ext_modules.append(
CUDAExtension(name='xentropy_cuda',
sources=['xentropy/interface.cpp',
'xentropy/xentropy_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'include')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros}))
# check build if sm80
# ext_modules.append(
# CUDAExtension(name='fmhalib',
# sources=[
# 'fmha/fmha_api.cpp',
# 'fmha/src/fmha_noloop_reduce.cu',
# 'fmha/src/fmha_fprop_fp16_128_64_kernel.sm80.cu',
# 'fmha/src/fmha_fprop_fp16_256_64_kernel.sm80.cu',
# 'fmha/src/fmha_fprop_fp16_384_64_kernel.sm80.cu',
# 'fmha/src/fmha_fprop_fp16_512_64_kernel.sm80.cu',
# 'fmha/src/fmha_dgrad_fp16_128_64_kernel.sm80.cu',
# 'fmha/src/fmha_dgrad_fp16_256_64_kernel.sm80.cu',
# 'fmha/src/fmha_dgrad_fp16_384_64_kernel.sm80.cu',
# 'fmha/src/fmha_dgrad_fp16_512_64_kernel.sm80.cu',
# ],
# extra_compile_args={'cxx': ['-O3',
# ] + version_dependent_macros + generator_flag,
# 'nvcc': ['-O3',
# '-gencode', 'arch=compute_80,code=sm_80',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + version_dependent_macros + generator_flag},
# include_dirs=[os.path.join(this_dir, "fmha/src")]))
setup(
name='nmtgminor_cuda',
version='0.1',
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 15,451
| 50.165563
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/flashattn/setup.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
from pathlib import Path
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
ext_modules.append(
CUDAExtension(
name="flash_attn_cuda",
sources=[
"fmha_api.cpp",
"src/fmha_fwd_hdim32.cu",
"src/fmha_fwd_hdim64.cu",
"src/fmha_fwd_hdim128.cu",
"src/fmha_bwd_hdim32.cu",
"src/fmha_bwd_hdim64.cu",
"src/fmha_bwd_hdim128.cu",
"src/fmha_block_fprop_fp16_kernel.sm80.cu",
"src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir),
Path(this_dir) / 'src',
Path(this_dir) / 'cutlass' / 'include',
],
)
)
setup(
name='flashmha',
version='0.1', \
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 6,031
| 34.904762
| 101
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/test_fused_dense.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
try:
import mlp_gelu_blaslt
except (ModuleNotFoundError, ImportError) as e:
mlp_gelu_blaslt = None
torch.backends.cuda.matmul.allow_tf32 = False
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpReluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
output = fused_mlp_relu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_gelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
ctx.weight_requires_grad = args[1].requires_grad
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
if ctx.weight_requires_grad:
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
for i in range(len(ctx.saved_tensors) - 1):
grads.append(None)
del ctx.outputs
del ctx.p
return (None, *grads)
class MlpGeLUFunctionBLASLT(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = mlp_gelu_blaslt.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = mlp_gelu_blaslt.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
if fused_mlp_gelu:
mlp_gelu_function = MlpGeLUFunction.apply
else:
mlp_gelu_function = None
if mlp_gelu_blaslt:
mlp_gelu_function_blaslt = MlpGeLUFunctionBLASLT.apply
else:
mlp_gelu_function_blaslt = None
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.5):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
# std = math.sqrt(1. / float(bias.size(0)))
# nn.init.normal_(bias, 0., 0.0)
# nn.init.constant_(bias, 0)
bias.data.zero_()
def forward(self, input, mask=None, ref=False, fastest=False):
if fastest and not ref:
return mlp_gelu_function_blaslt(self.dropout, input, *self.weights, *self.biases)
if ref:
return self.forward_ref(input, mask)\
return mlp_gelu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
if self.dropout > 0:
output = F.gelu(output) * dropout_mask.view_as(output) * pinv
else:
output = F.gelu(output)
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}, dropout={self.dropout}"
return s
seq_len = 32
batch_size = 64
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [1024, 250056]
num_iters = 32
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_forward_float(self):
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(seq_len, batch_size, mlp_sizes[0],
device="cuda").uniform_(-0.01, 0.01).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
grad = torch.randn_like(mlp_out)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
# def test_numeric(self):
# print("Test numeric 3D ....")
# for dropout in [0.0, 0.2, 0.5, 0.7]:
# mlp = MLP(mlp_sizes, activation='relu', dropout=dropout).cuda()
#
# print(mlp)
# ref_mlp = deepcopy(mlp)
#
# for _ in range(1):
# bsz = random.randint(8, batch_size // 8) * 8
# test_input = torch.empty(seq_len, bsz, mlp_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
#
# print(dropout_mask.sum() / dropout_mask.numel(), dropout_mask.numel())
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
# np.testing.assert_allclose(
# mlp.biases[0].grad.detach().cpu().numpy(),
# ref_mlp.biases[0].grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
# def test_with_bias_half(self):
# for dropout in [0.0, 0.5]:
# mlp = MLP(mlp_sizes, dropout=dropout).cuda()
# mlp.half()
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0],
# device="cuda", dtype=torch.half).uniform_(-0.01, 0.01).requires_grad_()
#
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# grad = torch.randn_like(mlp_out).half()
# mlp_out.mul_(1).backward(grad)
# ref_out.mul_(1).backward(grad)
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# for l in range(mlp.num_layers):
# np.testing.assert_allclose(
# mlp.weights[l].grad.detach().cpu().numpy(),
# ref_mlp.weights[l].grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# np.testing.assert_allclose(
# mlp.biases[l].grad.detach().cpu().numpy(),
# ref_mlp.biases[l].grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# def test_with_bias_float(self):
# for dropout in [0.0, 0.5]:
# print("Testing with dropout ... %.2f" % dropout)
# mlp = MLP(mlp_sizes, dropout=dropout).cuda()
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0],
# device="cuda").uniform_(-0.01, 0.01).requires_grad_()
#
# ref_input = test_input.clone().detach().requires_grad_()
#
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
#
# grad = torch.randn_like(mlp_out)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-5)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mul_(10).backward(grad)
# ref_out.mul_(10).backward(grad)
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
#
# for l in range(mlp.num_layers):
# np.testing.assert_allclose(
# mlp.weights[l].grad.detach().cpu().numpy(),
# ref_mlp.weights[l].grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-5)
# np.testing.assert_allclose(
# mlp.biases[l].grad.detach().cpu().numpy(),
# ref_mlp.biases[l].grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-5)
# def test_no_weight_grad(self):
#
# print("Test backward no weight grad ...")
# for dropout in [0.0, 0.35]:
# mlp = MLP(mlp_sizes, activation="gelu", dropout=dropout).cuda()
# print(mlp)
# for p in mlp.parameters():
# p.requires_grad = False
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda", ).uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
# for l in range(mlp.num_layers):
# np.testing.assert_allclose(
# mlp.weights[l].grad.detach().cpu().numpy(),
# ref_mlp.weights[l].grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
# np.testing.assert_allclose(
# mlp.biases[l].grad.detach().cpu().numpy(),
# ref_mlp.biases[l].grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
# def test_no_grad(self):
# mlp = MLP(mlp_sizes).cuda()
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
# ref_input = test_input.clone().detach()
# mlp_out, dropout_mask = mlp(test_input)
#
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
def test_performance_half(self):
print("Testing performance ...")
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda().half()
print(mlp)
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(dropout))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
seq_len, batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len, batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input, fastest=False)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP BLASLT time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out, _ = mlp(test_input, fastest=True)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"BLASLT MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 20,463
| 37.466165
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/setup.py
|
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print('\nWarning: Torch did not find available GPUs on this system.\n',
'If your intention is to cross-compile, this is not an error.\n'
'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
'Volta (compute capability 7.0), Turing (compute capability 7.5),\n'
'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n'
'If you wish to cross-compile for a single specific architecture,\n'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
# print(os.environ["TORCH_CUDA_ARCH_LIST"])
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" +
"The latest stable release can be obtained from https://pytorch.org/")
cmdclass = {}
ext_modules = []
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
if CUDA_HOME is None:
raise RuntimeError("--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
# ext_modules.append(
# CUDAExtension(name='fused_dense_cuda',
# sources=['fused_dense.cpp',
# 'fused_dense_cuda.cu'],
# extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
# 'nvcc':['-O3'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='mlp_gelu_blaslt',
sources=['mlp_gelu.cpp',
'mlp_gelu_cuda.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-O3'] + version_dependent_macros}))
setup(
name='test-mlp-blastlt',
version='0.1',
description='PyTorch Extensions written by NVIDIA',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension} if ext_modules else {},
)
| 5,888
| 45.738095
| 277
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/test_linear.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
from torch import Tensor
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import linear_blaslt
except (ModuleNotFoundError, ImportError) as e:
linear_blaslt = None
torch.backends.cuda.matmul.allow_tf32 = False
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class LinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
output = linear_blaslt.forward(input, weight, bias)
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
if weight.requires_grad:
d_input, d_weight, d_bias = linear_blaslt.backward(input, weight, grad_output)
else:
d_input = linear_blaslt.backward_input_only(input, weight, grad_output)
d_weight, d_bias = None, None
return d_input, d_weight, d_bias
if linear_blaslt:
def linear_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return LinearFunction.apply(*args)
else:
linear_function = torch.nn.functional.linear
class Linear(torch.nn.Linear):
def forward(self, input: Tensor) -> Tensor:
return linear_function(input, self.weight, self.bias)
if __name__ == '__main__':
seq_len = 32
batch_size = 64
# linear_sizes = [1024, 4096, 1024]
input_size = 1024
output_size = 250000
num_iters = 32
class TestLinear(unittest.TestCase):
def test_forward_float(self):
test_linear = Linear(input_size, output_size).cuda()
linear = test_linear
ref_linear = torch.nn.Linear(input_size, output_size).cuda()
ref_linear.weight.data.copy_(test_linear.weight.data)
ref_linear.bias.data.copy_(test_linear.bias.data)
test_input = torch.empty(seq_len, batch_size, input_size,
device="cuda").uniform_(-0.01, 0.01).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
linear_out = test_linear(test_input)
ref_out = ref_linear(ref_input)
grad = torch.randn_like(linear_out)
np.testing.assert_allclose(
linear_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
linear_out.mul_(1).backward(grad)
ref_out.mul_(1).backward(grad)
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
linear.weight.grad.detach().cpu().numpy(),
ref_linear.weight.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
linear.bias.grad.detach().cpu().numpy(),
ref_linear.bias.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
def test_forward_float_input_only(self):
test_linear = Linear(input_size, output_size).cuda()
linear = test_linear
ref_linear = torch.nn.Linear(input_size, output_size).cuda()
for p in test_linear.parameters():
p.requires_grad = False
for p in ref_linear.parameters():
p.requires_grad = False
ref_linear.weight.data.copy_(test_linear.weight.data)
ref_linear.bias.data.copy_(test_linear.bias.data)
test_input = torch.empty(seq_len, batch_size, input_size,
device="cuda").uniform_(-0.01, 0.01).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
linear_out = test_linear(test_input)
ref_out = ref_linear(ref_input)
grad = torch.randn_like(linear_out)
np.testing.assert_allclose(
linear_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
linear_out.mul_(1).backward(grad)
ref_out.mul_(1).backward(grad)
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
# np.testing.assert_allclose(
# linear.weight.grad.detach().cpu().numpy(),
# ref_linear.weight.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-5)
# np.testing.assert_allclose(
# linear.bias.grad.detach().cpu().numpy(),
# ref_linear.bias.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-5)
#
# def test_precision_half(self):
#
# test_linear = Linear(input_size, output_size).half().cuda()
# linear = test_linear
#
# ref_linear = torch.nn.Linear(input_size, output_size).half().cuda()
#
# ref_linear.weight.data.copy_(test_linear.weight.data)
# ref_linear.bias.data.copy_(test_linear.bias.data)
#
# test_input = torch.empty(seq_len, batch_size, input_size,
# device="cuda").uniform_(-0.01, 0.01).half().requires_grad_()
#
# ref_input = test_input.clone().detach().requires_grad_()
#
# linear_out = test_linear(test_input)
# ref_out = ref_linear(ref_input)
#
# grad = torch.randn_like(linear_out)
# np.testing.assert_allclose(
# linear_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# linear_out.mul_(1).backward(grad)
# ref_out.mul_(1).backward(grad)
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# np.testing.assert_allclose(
# linear.weight.grad.detach().cpu().numpy(),
# ref_linear.weight.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# np.testing.assert_allclose(
# linear.bias.grad.detach().cpu().numpy(),
# ref_linear.bias.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# def test_numeric(self):
# print("Test numeric 3D ....")
# for dropout in [0.0, 0.2, 0.5, 0.7]:
# linear = linear(linear_sizes, activation='relu', dropout=dropout).cuda()
#
# print(linear)
# ref_linear = deepcopy(linear)
#
# for _ in range(1):
# bsz = random.randint(8, batch_size // 8) * 8
# test_input = torch.empty(seq_len, bsz, linear_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# linear_out, dropout_mask = linear(test_input)
# ref_out = ref_linear.forward(ref_input, dropout_mask, ref=True)
#
# print(dropout_mask.sum() / dropout_mask.numel(), dropout_mask.numel())
# np.testing.assert_allclose(
# linear_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# linear_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
# np.testing.assert_allclose(
# linear.biases[0].grad.detach().cpu().numpy(),
# ref_linear.biases[0].grad.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
def test_performance_half(self):
print("Testing performance ...")
test_linear = Linear(input_size, output_size).half().cuda()
linear = test_linear
ref_linear = torch.nn.Linear(input_size, output_size).half().cuda()
test_input = torch.empty(
seq_len, batch_size, input_size, device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len, batch_size, input_size, device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(num_iters):
ref_out = ref_linear(ref_input)
ref_loss = ref_out.mean()
ref_linear.zero_grad()
ref_loss.backward()
linear_out = linear(test_input)
test_loss = linear_out.mean()
linear.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_linear(ref_input)
ref_loss = ref_out.mean()
ref_linear.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch linear time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
linear_out = linear(test_input)
test_loss = linear_out.mean()
linear.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ linear BLASLT time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
| 11,042
| 36.181818
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/blru/blru.py
|
import sys
python = sys.argv[1]=="0"
import time
if not python:
from torch.utils.cpp_extension import load
blru = load(name="blru", sources=["blru.cpp","blru_kernel.cu"]) #, verbose=True)
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from torch.cuda.amp import autocast
import warnings
warnings.filterwarnings("ignore", category=UserWarning, message="ComplexHalf support is experimental*")
class BLRUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, Lambda, Bu, lengths, direction):
max_len = lengths.max().item()
arange = torch.arange(int(math.log(max_len,2))+1, device=lengths.device) # log(L)
two_exp = 2**arange # log(L)
Lambda_exp = Lambda.unsqueeze(-1).pow(two_exp).to(Bu.dtype) # N x log(L)
output = blru.forward(Lambda_exp, Bu.contiguous(), lengths, direction)
variables = [Lambda_exp, output, lengths]
ctx.direction = direction
ctx.save_for_backward(*variables)
return output
@staticmethod
def backward(ctx, grad_output):
# Why to conj in this function is explained here: https://pytorch.org/docs/stable/notes/autograd.html
# Basically:
# grad_output is \partial L/\partial s* (thats what pytorch calculates)
# => grad_output.conj() is \partial L/\partial s (because the domain of L is R)
# => outputs is \partial L/\partial z (thats what the backward function calculates)
# => d_Lambda.conj(), d_Bu.conj() is \partial L/\partial z* (again because the domain of L is R)
Lambda_exp, output, lengths = ctx.saved_tensors
outputs = blru.backward(grad_output.conj().contiguous(), Lambda_exp, output, lengths, ctx.direction)
d_Lambda, d_Bu = outputs
return d_Lambda.conj(), d_Bu.conj(), None, None
def BLRUFunctionPython(Lambda, Bu, lengths, direction):
if direction == 2:
return torch.cat([BLRUFunctionPython(Lambda[:Bu.shape[1]//2],Bu[:,:Bu.shape[1]//2],lengths,0),
BLRUFunctionPython(Lambda[Bu.shape[1]//2:],Bu[:,Bu.shape[1]//2:],lengths,1)],1)
# Bu has shape [B x T x D]
output = torch.empty_like(Bu)
for B in range(Bu.shape[0]):
for N in range(Bu.shape[1]):
if direction == 0:
for L in range(lengths[B]):
if L == 0:
v = Bu[B,N,L]
else:
v = Lambda[N] * v + Bu[B,N,L]
output[B,N,L] = v
elif direction == 1:
for L in range(lengths[B]-1,-1,-1):
if L == lengths[B]-1:
v = Bu[B,N,L]
else:
v = Lambda[N] * v + Bu[B,N,L]
output[B,N,L] = v
else:
raise NotImplementedError
return output
class BLRU(nn.Module):
def __init__(self, H, N, direction=0, r_min=0, r_max=1, max_phase=2*np.pi):
super().__init__()
"""Initialize parameters of the LRU layer."""
# N: state dimension, H: model dimension
# Initialization of Lambda is complex valued distributed uniformly on ring
# between r_min and r_max, with phase in [0, max_phase].
u1 = torch.rand((N,)) # N
self.nu_log = nn.Parameter(torch.log(-0.5 * torch.log(u1 * (r_max ** 2 - r_min ** 2) + r_min ** 2))) # N
u2 = torch.rand((N,)) # N
self.theta_log = nn.Parameter(torch.log(max_phase * u2)) # N
# Glorot initialized Input/Output projection matrices
B = torch.randn(H, N, 2) / ((2 * H) ** 0.5) # H x N x 2
self.C = nn.Parameter(torch.randn(2, H, N) / (N ** 0.5)) # 2 x N x H
with torch.no_grad():
# Normalization factor
diag_lambda = torch.exp(-torch.exp(self.nu_log) + 1j * torch.exp(self.theta_log)) # N
gamma_log = torch.sqrt(1 - torch.abs(diag_lambda) ** 2).unsqueeze(-1) # N x 1
self.B = nn.Parameter(B * gamma_log) # H x N x 2
self.direction = direction
def forward(self, u, lengths, python=False):
"""Forward pass of the LRU layer. Output sequence y and input_sequence u are of shape (B, L, H)."""
Lambda = torch.exp(-torch.exp(self.nu_log) + 1j * torch.exp(self.theta_log)) # N
Bu = torch.view_as_complex(torch.einsum("blh,hnz->bnlz", u, self.B)) # B x L x N
if not python:
x = BLRUFunction.apply(Lambda, Bu, lengths, self.direction).transpose(2, 1) # B x L x N
else:
x = BLRUFunctionPython(Lambda, Bu, lengths, self.direction).transpose(2, 1) # B x L x N
y = torch.matmul(x.real, self.C[0]) - torch.matmul(x.imag, self.C[1]) # B x L x H
return y
def calc(seq, lengths, python, label):
with autocast(enabled=True):
for i in range(24):
seq = layer(seq, lengths, python=python)
err = seq - label
mask = torch.arange(err.shape[1], device=err.device).unsqueeze(0) < lengths.unsqueeze(1)
loss = (err * err)[mask].sum()
loss.backward()
return seq, loss
if __name__ == "__main__":
device = "cuda"
torch.manual_seed(42)
B = 64
L = 1000
H = d_model = 1024
N = d_hidden = 1024
"""B = 3
L = 20
H = d_model = 4
N = d_hidden = 4"""
direction = 2
layer = BLRU(d_model, d_hidden, direction).to(device)
inp = torch.randn(B, L, d_model, device=device, requires_grad=True)
lengths = torch.randint(1, L+1, (B,), dtype=torch.int32, device=device)
label = torch.randn_like(inp)
if python:
print("PYTHON")
else:
print("CUDA")
n = 0
t = time.time()
while True:
seq, loss = calc(inp, lengths, python, label)
n += 1
print("n",n,time.time()-t)
t = time.time()
if n==1:
#print("output",seq)
print("d_nu_log",layer.nu_log.grad)
print("d_theta_log",layer.theta_log.grad)
print("d_B",layer.B.grad)
| 6,085
| 34.383721
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/multihead_attn/setup.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
# if int(bare_metal_major) >= 11:
# cc_flag.append('-gencode')
# cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
ext_modules.append(
CUDAExtension(name='encdec_multihead_attn_cuda',
sources=['encdec_multihead_attn.cpp',
'encdec_multihead_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ],
'nvcc': ['-O3',
'-I./cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + cc_flag}))
# ext_modules.append(
# CUDAExtension(name='fused_dropout_add_cuda',
# sources=['dropout_add.cpp',
# 'dropout_add_cuda.cu'],
# extra_compile_args={'cxx': ['-O3', ],
# 'nvcc': ['-O3',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
ext_modules.append(
CUDAExtension(name='mask_softmax_dropout_cuda',
sources=['masked_softmax_dropout.cpp',
'masked_softmax_dropout_cuda.cu'],
extra_compile_args={'cxx': ['-O3', ],
'nvcc': ['-O3',
'-I./cutlass/include',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + cc_flag}))
# ext_modules.append(
# CUDAExtension(name='rel_self_attn_cuda',
# sources=['relative_self_attn.cpp',
# 'relative_self_attn_cuda.cu'],
# extra_compile_args={'cxx': ['-O3',],
# 'nvcc':['-O3',
# '-I./cutlass/',
# '-U__CUDA_NO_HALF_OPERATORS__',
# '-U__CUDA_NO_HALF_CONVERSIONS__',
# '--expt-relaxed-constexpr',
# '--expt-extended-lambda',
# '--use_fast_math'] + cc_flag}))
setup(
name='optimized_multihead_attention',
version='0.1',
packages=find_packages(exclude=('build',
'csrc',
'include',
'tests',
'dist',
'docs',
'tests',
'examples',
'apex.egg-info',)),
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 5,186
| 42.588235
| 98
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/layer_norm/test_layer_norm.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import fast_layer_norm_cuda
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.contiguous()
hidden_size = gamma.numel()
xmat = x
ymat, mu, rsigma = fast_layer_norm_cuda.ln_fwd(xmat, gamma, beta, epsilon)
ctx.save_for_backward(x, gamma, mu, rsigma)
return ymat
@staticmethod
def backward(ctx, dy):
# assert dy.is_contiguous()
dy = dy.contiguous() # this happens!
x, gamma, mu, rsigma = ctx.saved_tensors
hidden_size = gamma.numel()
dx, dgamma, dbeta, _, _ = fast_layer_norm_cuda.ln_bwd(dy, x, mu, rsigma, gamma)
return dx, dgamma, dbeta, None
def _fast_layer_norm(x, weight, bias, epsilon):
args = _cast_if_autocast_enabled(x, weight, bias, epsilon)
with torch.cuda.amp.autocast(enabled=False):
return FastLayerNormFN.apply(*args)
class FastLayerNorm(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-5):
super().__init__()
self.epsilon = eps
self.weight = torch.nn.Parameter(torch.Tensor(hidden_size))
self.bias = torch.nn.Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
from torch.nn import init
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, x):
return _fast_layer_norm(x, self.weight, self.bias, self.epsilon)
def extra_repr(self):
# TODO add dropout probability
s = F"Fast Layer Norm w/ Hidden sizes: {self.weight.size(0)}"
return s
if __name__ == '__main__':
class TestLN(unittest.TestCase):
# def test_creation(self):
# MLP(mlp_sizes)
def test_numeric(self):
# print("Test numeric 3D ....")
# for dropout in [0.0, 0.2, 0.5, 0.7]:
bsz = 128
seq_len = 512
hidden_sizes = [
768,
1024,
# 1536,
# 2048,
# 2304,
# 3072,
# 3840,
# 4096,
# 5120,
# 6144,
# 8192,
# 10240,
# 12288,
# 12800,
# 15360,
# 16384,
# 18432,
# 20480,
# 24576,
# 25600,
# 30720,
# 32768,
# 40960,
# 49152,
# 65536,
]
for hidden in hidden_sizes:
ref_ln = nn.LayerNorm(hidden).cuda()
fast_ln = FastLayerNorm(hidden).cuda()
print(fast_ln, ref_ln)
test_input = torch.empty(seq_len, bsz, hidden, device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
ref_out = ref_ln(ref_input)
test_out = fast_ln(test_input)
np.testing.assert_allclose(
ref_out.detach().cpu().numpy(),
test_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
test_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
fast_ln.weight.grad.detach().cpu().numpy(),
ref_ln.weight.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
fast_ln.bias.grad.detach().cpu().numpy(),
ref_ln.bias.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
num_iters = 64
bsz = 128
seq_len = 512
print("Testing performance ...")
hidden_sizes = [
768,
1024,
1536,
2048,
2304,
3072,
3840,
4096,
5120,
6144,
8192,
10240,
12288,
12800,
15360,
# 16384,
# 18432,
# 20480,
# 24576,
# 25600,
# 30720,
# 32768,
# 40960,
# 49152,
# 65536,
]
for hidden in hidden_sizes:
ref_ln = nn.LayerNorm(hidden).cuda().half()
fast_ln = FastLayerNorm(hidden).cuda().half()
print(fast_ln, ref_ln)
test_input = torch.empty(seq_len, bsz, hidden, device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
# Warm up GPU
for _ in range(num_iters):
ref_out = ref_ln(ref_input)
ref_loss = ref_out.mean()
ref_ln.zero_grad()
ref_loss.backward()
test_out = fast_ln(test_input)
test_loss = test_out.mean()
fast_ln.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_ln(ref_input)
ref_loss = ref_out.mean()
ref_ln.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Layer Norm time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
test_out = fast_ln(test_input)
test_loss = test_out.mean()
fast_ln.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP 2D time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
| 7,215
| 31.071111
| 130
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/multihead_attn_blaslt/setup.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
ext_modules.append(
CUDAExtension(name='self_multihead_attn_blaslt',
sources=['self_multihead_attn.cpp',
'self_multihead_attn_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
setup(
name='test-attn-blaslt',
version='0.1', \
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 5,264
| 39.19084
| 101
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/extensions/mlp/mlp_gelu_dropoutadd.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import silu_cuda
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu_dropout_add
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu_dropout_add = None
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpGeLUDropoutAddFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, r_p, *args):
outputs = fused_mlp_gelu_dropout_add.forward(p, r_p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-2]
residual_mask = outputs[-1]
ctx.p = p
ctx.r_p = p
return outputs[0], dropout_mask, residual_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
r_p = ctx.r_p
grads = fused_mlp_gelu_dropout_add.backward(p, r_p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_gelu_dropout_add:
mlp_gelu_dropout_add_function = half_function(MlpGeLUDropoutAddFunction.apply)
else:
mlp_gelu_dropout_add_function = None
class SwishFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inp):
ctx.save_for_backward(inp)
return silu_cuda.forward(inp)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
inp, = ctx.saved_tensors
if not ctx.needs_input_grad[0]: return (None,)
return silu_cuda.backward(inp, grad_out)
def fast_silu(input):
return SwishFunction.apply(input)
class FastSiLU(torch.nn.Module):
def forward(self, input):
return fast_silu(input)
class AGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x):
ctx.save_for_backward(x)
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
x, = ctx.saved_tensors
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return grad_out * retf
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.0, res_dropout=0.0):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
self.res_dropout = res_dropout
if activation is 'relu':
self.activation = 1
elif activation is 'sigmoid':
self.activation = 2
elif activation is 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, residual_mask=None, ref=False):
if ref:
return self.forward_ref(input, mask, residual_mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_dropout_add_function(self.dropout, self.res_dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask, res_mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
if self.dropout > 0:
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
else:
output = F.gelu(output)
i += output.numel()
pinv = 1 / (1 - self.res_dropout)
output = output * res_mask.view(output.size(0), -1) * pinv + input if self.res_dropout > 0 else output + input
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 20
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='gelu', dropout=0.0, res_dropout=0.0).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(8):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask, residual_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, residual_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask, residual_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, residual_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.0))
else:
mlp_layers.append(nn.Dropout(0.0))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input) + ref_input
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input) + ref_input
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 11,654
| 32.491379
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/batch_ensemble/be_relative_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s, need_grad_x=True):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = grad_y
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
if need_grad_x :
grad_x = torch.mul(grad_r, ensemble_r)
else:
grad_x = None
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
mm = BatchEnsembleMM
class RelativeShiftFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, batch_first, emb_last):
assert len(x.shape) == 3, "Input must have 3 dimensions B x len_q x len_r!"
ctx.batch_first = batch_first
ctx.emb_last = emb_last
return RelativeShift.forward(x, batch_first, emb_last)
@staticmethod
def backward(ctx, grad_x):
batch_first = ctx.batch_first
emb_last = ctx.emb_last
return RelativeShift.backward(grad_x, batch_first, emb_last), False, False
class RelativeShift(object):
@staticmethod
def forward(x, batch_first, emb_last):
assert len(x.shape) == 3, "Input must have 3 dimensions B x len_q x len_r or len_q x len_r x demb!"
assert (batch_first or emb_last) and not(batch_first and emb_last), \
"Batch first and Embedding last must be mutually exclusive"
if batch_first:
bsz = x.size(0)
zero_pad = torch.zeros((bsz, x.size(1), 1),
device=x.device, dtype=x.dtype)
# padded into [T x T+1 x (B x H)]
x_padded = torch.cat([zero_pad, x], dim=2)
# view into [T+1 x T x (BxH)]
x_view = x_padded.view(bsz, x.size(2) + 1, x.size(1))
# remove the first collumn
x = x_view[:, 1:].view_as(x)
else:
raise NotImplementedError
return x
@staticmethod
def backward(grad_x, batch_first, emb_last):
if batch_first:
bsz = grad_x.size(0)
len_q, len_r = grad_x.size(1), grad_x.size(2)
grad_x_view = grad_x.view(bsz, len_r, len_q)
zero_pad = torch.zeros((bsz, 1, len_q), device=grad_x.device, dtype=grad_x.dtype)
# grad_x should have size B x len_q x len_r
# x_view should have size B x len_q+1 x len_r
# put the zeros into the missing gradients
grad_x_view = torch.cat([zero_pad, grad_x_view], dim=1)
# print(grad_x_view.size())
grad_x_padded = grad_x_view.view(bsz, len_q, len_r + 1)
# because the first index in the padded dim was from zero_pad
grad_output = grad_x_padded[:, :, 1:]
else:
raise NotImplementedError
return grad_output
class RelativeSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, pos, use_time_mask, is_training, heads, ensemble,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_p, s_p,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache,
double_precision):
"""
:param double_precision: ops at float64, only for debugging
:param ctx: context object to stash information for backward
:param inputs: input hidden states [len_q x batch_size x hidden]
:param pos: [len_k x 1 x hidden]
:param use_time_mask: bool, if we use the causal mask for decoder
:param is_training: training state, for dropout
:param heads: number of heads
:param input_weights: weight matrix [hidden x 3*hidden]
:param output_weights: output weight [hidden x hidden]
:param input_biases: bias [3*hidden]
:param output_biases: output bias [bias]
:param pos_biases:
:param pos_weights:
:param r_w_bias:
:param r_r_bias:
:param mask: None or [B x T] or [T x T]
:param dropout_prob:
:param incremental:
:param incremental_cache:
:return:
"""
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
bsz, len_q = inputs.size(1), inputs.size(0)
len_r = pos.size(0) # r can be longer than query, i.e for bidirectional attention we need 2k+1 positions
len_k = len_q # because of self-attention
if not is_training:
bsz = bsz // ensemble
if pos.size(1) == 1:
pos = pos.repeat(1, bsz, 1) # to T x B x H
# # Input Linear GEMM
# # input1: (activations) [len_q, bsz, hidden]
# # input2: (weights) [hidden*3 (3072), hidden (1024)] (transpose [0,1])
# # output: [len_q, bsz, hidden*3]
# # GEMM: ( (len_q*bsz) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (len_q*bsz x embed_dim*3)
qkv, qkv_mm, qkv_r = mm.forward(inputs, input_weights, input_biases, r_i, s_i)
if not is_training:
qkv = qkv.view(len_q, ensemble, bsz, qkv.size(-1))
qkv = torch.mean(qkv, dim=1)
rpos, rpos_mm, rpos_r = mm.forward(pos, pos_weights, pos_biases, r_p, s_p)
if not is_training:
rpos = rpos.view(len_r, ensemble, bsz, rpos.size(-1))
rpos = torch.mean(rpos, dim=1)
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
qkv = qkv.view(len_q, bsz * heads, 3, head_dim)
queries = qkv[:, :, 0, :]
keys = qkv[:, :, 1, :]
values = qkv[:, :, 2, :]
r_head_k = rpos.view(pos.size(0), bsz * heads, head_dim) # T x BxH x D
if incremental:
# We have to change the heads x head_dim first and then concat to the T dim
# bsz is changed during translation due to beam search
# during translation we want to keep the actual T dim in MM as 1 constantly
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
# Relative Attention from here:
# r_w_bias size: head * head_dim
rw_head_q = queries.view(len_q, bsz, heads, head_dim) + r_w_bias #
rw_head_q = rw_head_q.view(len_q, bsz * heads, head_dim)
# matmul1 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# keys: [len_k, bsz*heads, head_dim] transpose(0, 1)
if queries.is_cuda:
matmul_ac = torch.empty((bsz * heads, queries.size(0), keys.size(0)), dtype=queries.dtype,
device=rw_head_q.device)
matmul_ac = torch.baddbmm(matmul_ac, rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2),
out=matmul_ac, beta=0.0, alpha=scale_t[0])
else:
matmul_ac = torch.bmm(rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2)).mul_(scale_t[0])
rr_head_q = queries.view(len_q, bsz, heads, head_dim) + r_r_bias #
rr_head_q = rr_head_q.view(len_q, bsz * heads, head_dim)
# matmul2 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# rel_positions: [len_r, bsz*heads, head_dim] transpose(0, 1)
if queries.is_cuda:
matmul_bd = torch.empty((bsz * heads, queries.size(0), len_r), dtype=queries.dtype,
device=rw_head_q.device)
matmul_bd = torch.baddbmm(matmul_bd, rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2),
out=matmul_bd, beta=0.0, alpha=scale_t[0])
else:
matmul_bd = torch.matmul(rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2)) \
.mul_(scale_t[0])
# shift so that the relative positions are aligned
# the first element will have 0 -1 ... -n relative positions compared to other elements
# the last element will have n-1 n-2 ... 0
matmul_bd = RelativeShift.forward(matmul_bd, True, False)
# if len_r is longer than len_k, then we need to take the first len_k positions only
matmul_bd = matmul_bd[:, :, :len_k]
attn_score = matmul_ac + matmul_bd # both AC and BD are scaled with scale_t before in baddbmm
# attn_score should have size [bsz*heads, len_q, len_k] for now
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
# assert (mask.size(0) == mask.size(1)), "Sequence length should match!"
mask = mask.to(torch.bool)
attn_score = attn_score.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, len_q, seql_k = attn_score.size()
bsz = int(batches / heads)
attn_score = attn_score.view(bsz, heads, len_q, seql_k)
mask = mask.to(torch.bool)
attn_score = attn_score.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
attn_score = attn_score.view(bsz * heads, len_q, seql_k)
dtype_ = torch.float64 if double_precision else torch.float32
softmax_results = F.softmax(attn_score, dim=-1, dtype=dtype_).type_as(attn_score)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [bsz*heads, len_q, seql_k]
# Input2: (values) [seql_v, bsz*heads, head_dim] transpose(0,1)
# Output: [len_q, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = (len_q x head_dim)
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=queries.device).transpose(1, 0)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(len_q, bsz, inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [len_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ len_q, bsz, embed_dim ]
# GEMM: ( len_q*bsz x embed_dim ) x ( embed_dim x embed_dim ) = ( len_q*bsz x embed_dim )
# outputs = torch.addmm(output_biases,
# matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
# output_weights.transpose(0, 1),
# beta=1., alpha=1.)
#
# outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
o_input = matmul2_results
# o, o_mm, o_r = mm.forward(o_input, output_weights, output_biases, r_o, s_o)
# outputs = o
outputs = torch.addmm(output_biases,
matmul2_results.view(len_q * bsz, inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(len_q, bsz, output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results, #
dropout_results,
softmax_results,
qkv, qkv_mm, qkv_r,
rpos_r, rpos_mm,
rw_head_q, rr_head_q,
inputs, pos, r_head_k,
input_weights, pos_weights, output_weights,
r_i, s_i, r_p, s_p,
r_w_bias, r_r_bias,
dropout_mask,
dropout_prob_t)
# with torch.no_grad():
# coverage = softmax_results.new(*softmax_results.size()).copy_(softmax_results)
coverage = softmax_results
return outputs.detach(), coverage
# return outputs.detach()
@staticmethod
def backward(ctx, output_grads, softmax_grads):
# def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads: gradients w.r.t the outputs
:param softmax_grads: unncessary except we use the attention weights somewhere
:return:
"""
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
qkv, qkv_mm, qkv_r, \
rpos_r, rpos_mm, \
rw_head_q, rr_head_q, \
inputs, pos, r_head_k, \
input_weights, pos_weights, output_weights, \
r_i, s_i, r_p, s_p, \
r_w_bias, r_r_bias, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
len_q, bsz = inputs.size(0), inputs.size(1)
len_r = pos.size(0)
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
qkv = qkv.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = qkv[:, :, 0, :]
keys = qkv[:, :, 1, :]
values = qkv[:, :, 2, :]
# The tensor is declared before hand to properly slice out query, key, and value grads.
qkv_grads = torch.empty_like(qkv)
queries_grads = qkv_grads[:, :, 0, :]
keys_grads = qkv_grads[:, :, 1, :]
values_grads = qkv_grads[:, :, 2, :]
# Output Linear Projection
o_input = matmul2_results
# output_lin_grads, output_weights_grads, output_biases_grads, r_o_grads, s_o_grads \
# = mm.backward(output_grads, o_input, o_r, o_mm, output_weights, r_o, s_o)
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
output_weights_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
output_biases_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
# Matmul2 - DGRAD1
# Input1: (data grads) [len_q, bsz*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [bsz*heads, len_q, seql_k]
# GEMM: Per batch: ( len_q x head_dim ) x ( head_dim x seql_k ) = ( len_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [len_q, bsz*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [bsz*heads, len_q, seql_k]
# GEMM: Per batch: ( len_q x head_dim ) x ( head_dim x seql_k ) = ( len_q x seql_k )
torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# print("Reached here")
# Mask and Scaling for Dropout (not a publically documented op)
if dropout_prob_t[0] > 0.0:
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
else:
dropout_grads = matmul2_dgrad1
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
attn_score_grads = softmax_grads
# the grads are evenly distributed to AC and BD
matmul_ac_grads = attn_score_grads
# Matmul1 - DGRAD1
# Input1: (data grads) [bsz*heads, len_q, seql_k]
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), matmul_ac_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
queries_grads_ac = queries_grads
r_w_bias_grads = torch.sum(queries_grads_ac.view(len_q, bsz, heads_t[0], -1), dim=[0, 1]) # heads * head_dim
matmul_bd_grads = attn_score_grads
if len_r > len_q: # if we cut off the BDs from before, then put the zero gradients behind
grad_cut = matmul_bd_grads.new_zeros((matmul_bd_grads.size(0), matmul_bd_grads.size(1), len_r - len_q))
matmul_bd_grads = torch.cat([matmul_bd_grads, grad_cut], dim=-1)
# backprop through the shifting
matmul_bd_grads = RelativeShift.backward(matmul_bd_grads, True, False)
# Matmul1 - DGRAD1
# Input1: (matmul_bd_grads) [bsz*heads, len_q, seql_k]
# Input2: (r_head_k) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
queries_grads_bd = queries_grads.new_empty(*queries_grads.size())
torch.baddbmm(queries_grads_bd.transpose(0, 1), matmul_bd_grads, r_head_k.transpose(0, 1),
out=queries_grads_bd.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# len_q x batch*heads x d_head
r_r_bias_grads = torch.sum(queries_grads_bd.view(len_q, bsz, heads_t[0], -1), dim=[0, 1])
# add the gradients from bd to queries
queries_grads.add_(queries_grads_bd)
# # MatmulAC - DGAD2
# Input1: (data grads) [bsz*heads, len_q, seql_k] transpose(1,2)
# Input2: (rw_head_q) [bsz*heads, head_dim, len_q] transpose(0,1)
# Output: [seql_k, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), matmul_ac_grads.transpose(1, 2),
rw_head_q.transpose(0, 1), out=keys_grads.transpose(0, 1),
beta=0.0, alpha=scale_t[0])
# MatmulBD - DGRAD2
# Input1: (data grads) [bsz*heads, len_q, len_r] transpose(1,2)
# Input2: (rr_head_q) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: r_head_k [len_r, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
r_head_k_grad = r_head_k.new_empty((len_r, bsz * heads_t[0], head_dim))
# rr_head_q = queries.view(len_q, bsz, heads_t[0], head_dim) + r_r_bias #
# rr_head_q = rr_head_q.view(len_q, bsz * heads_t[0], head_dim)
torch.baddbmm(r_head_k_grad.transpose(0, 1), matmul_bd_grads.transpose(1, 2).contiguous(),
rr_head_q.transpose(0, 1), out=r_head_k_grad.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# r_head_k_grad = torch.matmul(matmul_bd_grads.transpose(1, 2), rr_head_q.transpose(0, 1))
r_head_k_grad = r_head_k_grad.view(len_r, bsz, heads_t[0] * head_dim)
# Input Linear GEMM - DGRAD
# input1: (data grads) [len_q, bsz, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [len_q, bsz, embed_dim]
# GEMM: ( (len_q*bsz) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (len_q*bsz x embed_dim)
qkv_grads = qkv_grads.view(inputs.size(0), inputs.size(1), heads_t[0] * 3 * head_dim)
input_grads, input_weights_grads, input_biases_grads, r_i_grads, s_i_grads = \
mm.backward(qkv_grads, inputs, qkv_r, qkv_mm, input_weights, r_i, s_i)
_, pos_weights_grads, pos_biases_grads, r_p_grads, s_p_grads = \
mm.backward(r_head_k_grad, pos, rpos_r, rpos_mm, pos_weights, r_p, s_p, need_grad_x=False)
return input_grads, None, None, None, None, None, \
input_weights_grads, output_weights_grads, pos_weights_grads, \
input_biases_grads, output_biases_grads, pos_biases_grads, \
r_i_grads, s_i_grads, r_p_grads, s_p_grads, \
r_w_bias_grads, r_r_bias_grads, \
None, None, None, None, None
relative_self_attn_func = RelativeSelfAttnFunc.apply
class BERelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., ensemble=1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(embed_dim))
self.r_i = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(ensemble, 3 * embed_dim))
# self.r_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
# self.s_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_p = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_p = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.reset_parameters()
self.attn_func = RelativeSelfAttnFunc.apply
def reset_parameters(self, init='normal'):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
with torch.no_grad():
self.r_i.bernoulli_(0.5).mul_(-2).add_(1)
self.s_i.bernoulli_(0.5).mul_(-2).add_(1)
# self.r_o.bernoulli_(0.5).mul_(-2).add_(1)
# self.s_o.bernoulli_(0.5).mul_(-2).add_(1)
self.r_p.bernoulli_(0.5).mul_(-2).add_(1)
self.s_p.bernoulli_(0.5).mul_(-2).add_(1)
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, indices=None, mems=None,
incremental=False, incremental_cache=None, double_precision=False):
bsz = input.size(1)
ensemble = self.r_i.size(0)
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
if self.training:
if indices is None:
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_i = torch.index_select(self.r_i, 0, indices)
s_i = torch.index_select(self.s_i, 0, indices)
# r_o = torch.index_select(self.r_o, 0, indices)
# s_o = torch.index_select(self.s_o, 0, indices)
r_p = torch.index_select(self.r_p, 0, indices)
s_p = torch.index_select(self.s_p, 0, indices)
else:
input = input.repeat(1, ensemble, 1)
pos = pos.repeat(1, ensemble, 1)
# if key_padding_mask is not None:
# mask = mask.repeat(ensemble, 1)
r_i = self.r_i.repeat(bsz, 1).view(bsz, ensemble, self.r_i.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.r_i.size(-1))
s_i = self.s_i.repeat(bsz, 1).view(bsz, ensemble, self.s_i.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.s_i.size(-1))
r_p = self.r_p.repeat(bsz, 1).view(bsz, ensemble, self.r_p.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.r_p.size(-1))
s_p = self.s_p.repeat(bsz, 1).view(bsz, ensemble, self.s_p.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.s_p.size(-1))
# r_o = self.r_o.repeat(bsz, 1).view(bsz, ensemble, self.r_o.size(-1)). \
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
# s_o = self.s_o.repeat(bsz, 1).view(bsz, ensemble, self.s_o.size(-1)). \
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads, ensemble,
self.in_proj_weight, self.out_proj_weight, self.pos_proj_weight,
self.in_proj_bias, self.out_proj_bias, self.pos_proj_bias,
r_i, s_i, r_p, s_p,
self.r_w_bias, self.r_r_bias,
mask, self.dropout,
incremental, incremental_cache, double_precision)
# last False is double precision
return outputs, coverage
if __name__ == "__main__":
bsz = 4
seq_len_q = 4
seq_len_kv = 7
embed_dim = 32
n_heads = 4
output_size = 32
ensemble = 7
class TestNetwork(nn.Module):
def __init__(self):
super(TestNetwork, self).__init__()
self.func = relative_self_attn_func
self.n_heads = n_heads
def forward(self, q, r, input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias):
use_time_mask = False
mask = None
is_training = True
incremental = False
incremental_cache = None
double_precision = True
dropout_prob = 0.0
heads = self.n_heads
output, coverage = self.func(q, r, use_time_mask, is_training, heads,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache, double_precision)
return output
r_w_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
r_r_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)).double().cuda()
pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)).double().cuda()
pos_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
out_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
r_i = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_i = torch.nn.Parameter(torch.Tensor(bsz, 3 * embed_dim)).double().cuda()
r_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
r_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
std_ = math.sqrt(2.0 / (embed_dim + embed_dim))
nn.init.normal_(in_proj_weight, 0.0, std_)
nn.init.normal_(pos_proj_weight, 0.0, std_)
nn.init.normal_(out_proj_weight, 0.0, std_)
nn.init.normal_(r_w_bias, 0.0, std_)
nn.init.normal_(r_r_bias, 0.0, std_)
torch.nn.init.constant_(in_proj_bias, 0.0)
torch.nn.init.constant_(out_proj_bias, 0.0)
torch.nn.init.constant_(pos_proj_bias, 0.0)
with torch.no_grad():
r_i.bernoulli_(0.5).mul_(-2).add_(1)
s_i.bernoulli_(0.5).mul_(-2).add_(1)
r_p.bernoulli_(0.5).mul_(-2).add_(1)
s_p.bernoulli_(0.5).mul_(-2).add_(1)
r_o.bernoulli_(0.5).mul_(-2).add_(1)
s_o.bernoulli_(0.5).mul_(-2).add_(1)
model = TestNetwork()
q = torch.randn((seq_len_q, bsz, embed_dim), requires_grad=True)
r = torch.randn((seq_len_kv, bsz, embed_dim), requires_grad=False)
model = model.double().cuda()
q = q.double().cuda()
r = r.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, (q, r, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias))
print("Gradcheck successful!!!")
| 34,591
| 45
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/batch_ensemble/be_encdec_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
# from onmt.constants import double_precision
# from .batch_ensemble_linear import BatchEnsembleMM as mm
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = torch.sum(grad_y, (0, 1))
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
grad_x = torch.mul(grad_r, ensemble_r)
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
mm = BatchEnsembleMM
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, ensemble,
inputs_q, inputs_kv,
input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv,
mask, dropout_prob,
incremental, incremental_cache, double_precision):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs_q.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
bsz, len_q, len_k = inputs_q.size(1), inputs_q.size(0), inputs_kv.size(0)
if not is_training:
bsz = bsz // ensemble
# TODO: add incremental cache
# Linear Projection Q
q, q_mm, q_r = mm.forward(inputs_q, input_weights_q, input_biases_q, r_q, s_q)
if not is_training:
q = q.view(len_q, ensemble, bsz, q.size(-1))
q = torch.mean(q, dim=1)
# input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
# print(q.size())
queries = q.view(q.size(0), q.size(1) * heads, head_dim)
# Linear Projection KV
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
kv = torch.stack([keys, values], dim=-2)
else:
kv, kv_mm, kv_r = mm.forward(inputs_kv, input_weights_kv, input_biases_kv, r_kv, s_kv)
if not is_training:
kv = kv.view(kv.size(0), ensemble, kv.size(1) // ensemble, kv.size(-1))
kv = torch.mean(kv, dim=1)
kv = kv.view(kv.size(0), kv.size(1) * heads, 2, head_dim)
keys = kv[:, :, 0, :]
values = kv[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
# Self Attention Time Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
dtype_ = torch.float64 if double_precision else torch.float32
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=dtype_).type_as(matmul1_results)
# softmax_results = F.softmax(matmul1_results.float(), dim=-1).type_as(matmul1_results)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(len_q, bsz, inputs_q.size(2))
# # Output Linear GEMM
# # Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# # Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# # Output: [ seql_q, seqs, embed_dim ]
# # GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
# outputs = torch.mm(matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
# output_weights.transpose(0, 1))
# outputs = outputs.view(inputs_q.size(0), inputs_q.size(1), output_weights.size(0))
# Output Linear Projection
o_input = matmul2_results
# o, o_mm, o_r = mm.forward(o_input, output_weights, output_biases, r_o, s_o)
o = torch.addmm(output_biases,
o_input.view(len_q * bsz, o_input.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = o.view(len_q, bsz, output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
q, q_mm, q_r,
kv, kv_mm, kv_r,
inputs_q,
inputs_kv,
input_weights_q, input_biases_q, r_q, s_q,
input_weights_kv, input_biases_kv, r_kv, s_kv,
output_weights, output_biases,
dropout_mask,
dropout_prob_t)
# return o.detach()
with torch.no_grad():
softmax_results = softmax_results.new(*softmax_results.size()).copy_(softmax_results)
return outputs.detach(), softmax_results
@staticmethod
def backward(ctx, output_grads, softmax_grads):
heads_t, scale_t, matmul2_results, dropout_results, softmax_results \
, q, q_mm, q_r, kv, kv_mm, kv_r \
, inputs_q, inputs_kv \
, input_weights_q, input_biases_q, r_q, s_q \
, input_weights_kv, input_biases_kv, r_kv, s_kv \
, output_weights, output_biases \
, dropout_mask, dropout_prob_t \
= ctx.saved_tensors
head_dim = inputs_q.size(2) // heads_t[0]
# Slice out k,v from one big Input Linear output (should only impact meta data, no copies!)
# Batch sizes and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, bsz, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=bsz*heads, 2, head_dim]
queries = q.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
kv = kv.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = kv[:, :, 0, :]
values = kv[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
kv_grads = torch.empty_like(kv)
queries_grads = torch.empty_like(queries)
keys_grads = kv_grads[:, :, 0, :]
values_grads = kv_grads[:, :, 1, :]
# Output Linear Projection
o_input = matmul2_results
# output_lin_grads, output_weights_grads, output_biases_grads, r_o_grads, s_o_grads \
# = mm.backward(output_grads, o_input, o_r, o_mm, output_weights, r_o, s_o)
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
output_weights_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_biases_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1) * heads_t[0],
head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
# print(output_lin_grads.size(), values.size())
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Input Q Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
queries_grads = queries_grads.transpose(0, 1).view(inputs_q.size(0), inputs_q.size(1), heads_t[0] * head_dim)
# print("Reached 2 here")
# print(queries_grads.size(), q_r.size(), q_mm.size())
inputs_q_grads, input_weights_q_grads, input_biases_q_grads, r_q_grads, s_q_grads \
= mm.backward(queries_grads, inputs_q, q_r, q_mm, input_weights_q, r_q, s_q)
kv_grads = kv_grads.view(inputs_kv.size(0), inputs_kv.size(1), heads_t[0] * 2 * head_dim)
inputs_kv_grads, input_weights_kv_grads, input_biases_kv_grads, r_kv_grads, s_kv_grads \
= mm.backward(kv_grads, inputs_kv, kv_r, kv_mm, input_weights_kv, r_kv, s_kv)
return None, None, None, None \
, inputs_q_grads, inputs_kv_grads \
, input_weights_q_grads, input_weights_kv_grads, output_weights_grads \
, input_biases_q_grads, input_biases_kv_grads, output_biases_grads \
, r_q_grads, s_q_grads, r_kv_grads, s_kv_grads \
, None, None, None, None, None
# encdec_attn_func = EncdecAttnFunc.apply
class BEEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0., ensemble=1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias_q = Parameter(torch.Tensor(embed_dim))
self.in_proj_bias_kv = Parameter(torch.Tensor(2 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.r_q = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_q = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_kv = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_kv = torch.nn.Parameter(torch.Tensor(ensemble, 2 * embed_dim))
# self.r_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
# self.s_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.attn_func = EncdecAttnFunc.apply
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
torch.nn.init.constant_(self.in_proj_bias_q, 0.0)
torch.nn.init.constant_(self.in_proj_bias_kv, 0.0)
torch.nn.init.constant_(self.out_proj_bias, 0.0)
with torch.no_grad():
self.r_q.bernoulli_(0.5).mul_(-2).add_(1)
self.s_q.bernoulli_(0.5).mul_(-2).add_(1)
self.r_kv.bernoulli_(0.5).mul_(-2).add_(1)
self.s_kv.bernoulli_(0.5).mul_(-2).add_(1)
# self.r_o.bernoulli_(0.5).mul_(-2).add_(1)
# self.s_o.bernoulli_(0.5).mul_(-2).add_(1)
def forward(self, query, key, value, attn_mask=None, incremental=False, incremental_cache=None,
indices=None, double_precision=False):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
ensemble = self.r_q.size(0)
bsz = query.size(1)
if is_training:
if indices is None:
with torch.no_grad():
indices = torch.arange(0, bsz, device=query.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_q = torch.index_select(self.r_q, 0, indices)
s_q = torch.index_select(self.s_q, 0, indices)
r_kv = torch.index_select(self.r_kv, 0, indices)
s_kv = torch.index_select(self.s_kv, 0, indices)
#
# r_o = torch.index_select(self.r_o, 0, indices)
# s_o = torch.index_select(self.s_o, 0, indices)
else:
query = query.repeat(1, ensemble, 1)
key = key.repeat(1, ensemble, 1)
# attn_mask = attn_mask.repeat(ensemble, 1, 1)
r_q = self.r_q.repeat(bsz, 1).view(bsz, ensemble, self.r_q.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.r_q.size(-1))
s_q = self.s_q.repeat(bsz, 1).view(bsz, ensemble, self.s_q.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.s_q.size(-1))
r_kv = self.r_kv.repeat(bsz, 1).view(bsz, ensemble, self.r_kv.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.r_kv.size(-1))
s_kv = self.s_kv.repeat(bsz, 1).view(bsz, ensemble, self.s_kv.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.s_kv.size(-1))
# r_o = self.r_o.repeat(bsz, 1).view(bsz, ensemble, self.r_o.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
# s_o = self.s_o.repeat(bsz, 1).view(bsz, ensemble, self.s_o.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
outputs, coverage = self.attn_func(time_masking, is_training, self.num_heads, ensemble,
query, key,
self.in_proj_weight_q, self.in_proj_weight_kv, self.out_proj_weight,
self.in_proj_bias_q, self.in_proj_bias_kv, self.out_proj_bias,
r_q, s_q, r_kv, s_kv, attn_mask, self.dropout,
incremental, incremental_cache, double_precision)
return outputs, coverage
if __name__ == "__main__":
bsz = 4
seq_len_q = 4
seq_len_kv = 4
embed_dim = 32
n_heads = 4
output_size = 32
ensemble = 7
class TestNetwork(nn.Module):
def __init__(self):
super(TestNetwork, self).__init__()
self.func = EncdecAttnFunc.apply
self.n_heads = 4
def forward(self, q, kv, input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv):
use_time_mask = False
mask = None
is_training = True
incremental = False
incremental_cache = None
double_precision = True
dropout_prob = 0.0
heads = self.n_heads
#
# use_time_mask, is_training, heads, inputs_q, inputs_kv,
# input_weights_q, input_weights_kv, output_weights,
# input_biases_q, input_biases_kv, output_biases,
# r_q, s_q, r_kv, s_kv, r_o, s_o,
# mask, dropout_prob,
# incremental, incremental_cache, double_precision
output, coverage = self.func(use_time_mask, is_training, heads, q, kv,
input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv,
mask, dropout_prob,
incremental, incremental_cache, double_precision)
return output
in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim)).double().cuda()
out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_bias_q = Parameter(torch.Tensor(embed_dim)).double().cuda()
in_proj_bias_kv = Parameter(torch.Tensor(2 * embed_dim)).double().cuda()
out_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
r_q = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_q = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
r_kv = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_kv = torch.nn.Parameter(torch.Tensor(bsz, 2 * embed_dim)).double().cuda()
# r_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
# s_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
std_ = math.sqrt(2.0 / (embed_dim + embed_dim))
nn.init.normal_(in_proj_weight_q, 0.0, std_)
nn.init.normal_(in_proj_weight_kv, 0.0, std_)
nn.init.normal_(out_proj_weight, 0.0, std_)
torch.nn.init.constant_(in_proj_bias_q, 0.0)
torch.nn.init.constant_(in_proj_bias_kv, 0.0)
torch.nn.init.constant_(out_proj_bias, 0.0)
with torch.no_grad():
r_q.bernoulli_(0.5).mul_(-2).add_(1)
s_q.bernoulli_(0.5).mul_(-2).add_(1)
r_kv.bernoulli_(0.5).mul_(-2).add_(1)
s_kv.bernoulli_(0.5).mul_(-2).add_(1)
r_o.bernoulli_(0.5).mul_(-2).add_(1)
s_o.bernoulli_(0.5).mul_(-2).add_(1)
# model = BEEncdecMultiheadAttn(n_heads, embed_dim, 0.0, ensemble)
# model = BatchEnsembleLinear(embed_dim, output_size, ensemble)
model = TestNetwork()
q = torch.randn((seq_len_q, bsz, embed_dim), requires_grad=True)
kv = torch.randn((seq_len_kv, bsz, embed_dim), requires_grad=True)
model = model.double().cuda()
q = q.double().cuda()
kv = kv.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, (q, kv, in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
in_proj_bias_q, in_proj_bias_kv, out_proj_bias,
r_q, s_q, r_kv, s_kv, r_o, s_o))
| 26,603
| 46.677419
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/batch_ensemble/be_feed_forward.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/batch_ensemble/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/batch_ensemble/batch_ensemble_linear.py
|
import torch
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = grad_y
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
grad_x = torch.mul(grad_r, ensemble_r)
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
class BatchEnsembleLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, bias, ensemble_r, ensemble_s):
x_s, x_mm, x_r = BatchEnsembleMM.forward(x, weight, bias, ensemble_r, ensemble_s)
output = x_s
ctx.save_for_backward(x, weight, bias, ensemble_r, ensemble_s, x_mm, x_r)
return output
@staticmethod
def backward(ctx, grad_output):
x, weight, bias, ensemble_r, ensemble_s, x_mm, x_r = ctx.saved_tensors
grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s = \
BatchEnsembleMM.backward(grad_output, x, x_r, x_mm, weight, ensemble_r, ensemble_s)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
class BatchEnsembleLinear(torch.nn.Module):
# TODO: write gradcheck testing
def __init__(self, input_size, output_size, ensemble):
super().__init__()
self.weight = torch.nn.Parameter(torch.Tensor(output_size, input_size))
self.bias = torch.nn.Parameter(torch.Tensor(output_size))
self.r = torch.nn.Parameter(torch.Tensor(ensemble, input_size))
self.s = torch.nn.Parameter(torch.Tensor(ensemble, output_size))
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal':
torch.nn.init.xavier_normal_(self.weight)
else:
torch.nn.init.xavier_uniform_(self.weight)
# for batch ensemble we init r_i and s_i with random sign vectors
# with torch.no_grad():
# self.r.bernoulli_(0.5).mul_(-2).add_(1)
# self.s.bernoulli_(0.5).mul_(-2).add_(1)
torch.nn.init.normal_(self.r, 0.0, 0.02)
torch.nn.init.normal_(self.s, 0.0, 0.02)
def forward(self, input, indices=None):
"""
:param input: T x B x H
:param indices: T x B or B
:return:
"""
ensemble = self.r.size(0)
bsz = input.size(1) if len(input.shape) == 3 else input.size(0)
if indices is None: # if indices are not None, then w
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
# during training, we randomly select the ensemble_id into batch size
if self.training:
r = torch.index_select(self.r, 0, indices)
s = torch.index_select(self.s, 0, indices)
if len(input.shape) == 3:
return BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
if len(input.shape) == 2:
return torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
# during eval we have to repeat the dimensions ensemble times
else:
if len(input.shape) == 3:
if indices is not None:
len_x, bsz, hin = input.size(0), input.size(1), input.size(2)
input = input.repeat(1, ensemble, 1)
# we need the transpose step to ensure that both should have ensemble x batch
# but should it be ensemble x batch or batch x ensemble ? ...
# TODO: test at decoding time. batch_size=beam_size=1 should yield the same result
# r = self.r.repeat(bsz, 1).view(bsz, ensemble, self.r.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r.size(-1))
# s = self.s.repeat(bsz, 1).view(bsz, ensemble, self.s.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.s.size(-1))
input = input.view(len_x, ensemble, bsz, hin)
r = self.r.unsqueeze(1) # ensemble x 1 x hin
s = self.s.unsqueeze(1) # ensemble x 1 x hout
output = torch.mul(F.linear(torch.mul(input, r), self.weight, self.bias), s)
output = output.view(len_x, ensemble, bsz, output.size(-1))
# output = BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
# output = output.view(len_x, ensemble, bsz, -1)
output = torch.mean(output, dim=1)
return output
else:
r = torch.index_select(self.r, 0, indices)
s = torch.index_select(self.s, 0, indices)
if len(input.shape) == 3:
return BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
if len(input.shape) == 2:
return torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
else:
bsz, hin = input.size(0), input.size(1)
input = input.repeat(ensemble, 1)
r = self.r.repeat(bsz, 1).view(bsz, ensemble, self.r.size(-1)).\
transpose(0, 1).view(-1, self.r.size(-1))
s = self.s.repeat(bsz, 1).view(bsz, ensemble, self.s.size(-1)).\
transpose(0, 1).view(-1, self.s.size(-1))
output = torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
output = output.view(ensemble, bsz, -1)
output = torch.mean(output, dim=0)
return output
class BEPositionWiseFeedForward(torch.nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu', ensemble=1):
super().__init__()
# self.input_linear = BatchEnsembleLinear(model_size, inner_size, ensemble)
# self.output_linear = BatchEnsembleLinear(inner_size, model_size, ensemble)
self.variational = variational
self.dropout = dropout
self.activation = activation
self.ensemble = ensemble
self.in_proj_weight = torch.nn.Parameter(torch.Tensor(inner_size, model_size))
self.out_proj_weight = torch.nn.Parameter(torch.Tensor(model_size, inner_size))
self.in_proj_bias = torch.nn.Parameter(torch.Tensor(inner_size))
self.out_proj_bias = torch.nn.Parameter(torch.Tensor(model_size))
self.r_in = torch.nn.Parameter(torch.Tensor(ensemble, model_size))
self.s_in = torch.nn.Parameter(torch.Tensor(ensemble, inner_size))
self.r_out = torch.nn.Parameter(torch.Tensor(ensemble, inner_size))
self.s_out = torch.nn.Parameter(torch.Tensor(ensemble, model_size))
def forward(self, input, indices=None):
len_x, bsz = input.size(0), input.size(1)
ensemble = self.r_in.size(0)
if self.training:
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_in = torch.index_select(self.r_in, 0, indices)
s_in = torch.index_select(self.s_in, 0, indices)
r_out = torch.index_select(self.r_out, 0, indices)
s_out = torch.index_select(self.s_out, 0, indices)
input = torch.mul(input, r_in)
input = F.linear(input, self.in_proj_weight, self.in_proj_bias)
input = torch.mul(input, s_in)
input = F.relu(input)
if self.variational:
input = variational_dropout(input, p=self.dropout, training=self.training)
else:
input = F.dropout(input, p=self.dropout, training=self.training)
input = torch.mul(input, r_out)
input = F.linear(input, self.out_proj_weight, self.out_proj_bias)
input = torch.mul(input, s_out)
return input
else:
input = input.repeat(1, ensemble, 1).view(len_x, ensemble, bsz, input.size(-1))
input = torch.mul(input, self.r_in.unsqueeze(1))
input = F.linear(input, self.in_proj_weight, self.in_proj_bias)
input = torch.mul(input, self.s_in.unsqueeze(1))
input = F.relu(input)
input = torch.mul(input, self.r_out.unsqueeze(1))
input = F.linear(input, self.out_proj_weight, self.out_proj_bias)
input = torch.mul(input, self.s_out.unsqueeze(1))
input = torch.mean(input, dim=1)
return input
# hidden = self.input_linear(input, indices)
# hidden = F.relu(hidden)
# if self.variational:
# hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
# else:
# hidden = F.dropout(hidden, p=self.dropout, training=self.training)
# hidden = self.output_linear(hidden, indices)
return hidden
def reset_parameters(self, init='normal'):
torch.nn.init.xavier_normal_(self.in_proj_weight)
torch.nn.init.xavier_normal_(self.out_proj_weight)
torch.nn.init.constant_(self.in_proj_bias, 0.0)
torch.nn.init.constant_(self.out_proj_bias, 0.0)
torch.nn.init.normal_(self.r_in, 0.0, 0.02)
torch.nn.init.normal_(self.s_in, 0.0, 0.02)
torch.nn.init.normal_(self.r_out, 0.0, 0.02)
torch.nn.init.normal_(self.s_out, 0.0, 0.02)
# self.input_linear.reset_parameters(init)
# self.output_linear.reset_parameters(init)
if __name__ == "__main__":
bsz = 16
seq_len = 6
input_size = 16
output_size = 32
ensemble = 72
model = BatchEnsembleLinear(input_size, output_size, ensemble)
input = torch.randn((seq_len, bsz, input_size), requires_grad=True)
print(input)
model = model.double().cuda()
input = input.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, input)
| 12,098
| 38.15534
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/embedding.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/utils.py
|
import torch
def flatten_list(tensors):
flat = list()
indices = list()
shapes = list()
s = 0
for tensor in tensors:
shapes.append(tensor.shape)
flat_t = torch.flatten(tensor)
size = flat_t.shape[0]
flat.append(flat_t)
indices.append((s, s+size))
s += size
flat = torch.cat(flat).view(-1)
return flat, indices, shapes
def unflatten(flat, indices, shapes):
params = [flat[s:e] for (s, e) in indices]
for i, shape_p in enumerate(shapes):
params[i] = params[i].view(*shape_p)
return tuple(params)
| 599
| 18.354839
| 46
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/feed_forward.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class PositionWiseFeedForward(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu'):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
# two variables to record the (sum) of priors for all linear variables
self.log_prior = 0
self.log_variational_posterior = 0
in_proj_weight_mu = torch.Tensor(inner_size, model_size)
in_proj_weight_rho = torch.Tensor(inner_size, model_size)
out_proj_weight_mu = torch.Tensor(model_size, inner_size)
out_proj_weight_rho = torch.Tensor(model_size, inner_size)
in_proj_bias_mu = torch.Tensor(inner_size)
in_proj_bias_rho = torch.Tensor(inner_size)
out_proj_bias_mu = torch.Tensor(model_size)
out_proj_bias_rho = torch.Tensor(model_size)
mu, self.indices, self.shapes = \
flatten_list([in_proj_weight_mu, out_proj_weight_mu, in_proj_bias_mu, out_proj_bias_mu])
rho, _, _ = flatten_list([in_proj_weight_rho, out_proj_weight_rho, in_proj_bias_rho, out_proj_bias_rho])
self.mu = Parameter(mu)
self.rho = Parameter(rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.reset_parameters()
try:
from apex.mlp.mlp import mlp_function
self.optimized = 2
self.fast_mlp_func = mlp_function
except ModuleNotFoundError as e:
self.optimized = 2
def reset_parameters(self):
std_ = math.sqrt(2.0 / (self.model_size + self.inner_size))
nn.init.normal_(self.mu, 0.0, std_)
nn.init.normal_(self.rho, -5, 0.1)
def forward(self, input, sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = self.weight.sample(sample, calculate_log_probs)
in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias = \
unflatten(sampled_weights, self.indices, self.shapes)
if self.optimized == 2 or not input.is_cuda:
hidden = F.linear(input, in_proj_weight, in_proj_bias)
hidden = F.relu(hidden, inplace=True)
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training)
hidden = F.linear(hidden, out_proj_weight, out_proj_bias)
else:
# Apex MLP does not support dropout so instead we use dropconnect
# Theoretically they should be the same ^^
weights = [in_proj_weight,
out_proj_weight]
biases = [in_proj_bias,
out_proj_bias]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
# True = bias, 1 = relu
hidden = self.fast_mlp_func(True, 1, input.view(seq_len*bsz, -1), *weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
if calculate_log_probs:
# KL Divergence between prior and (variational) posterior
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return hidden
| 4,046
| 37.913462
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/gaussian.py
|
import torch
import torch.nn.functional as F
import numpy
import math
import torch.nn as nn
log_sqrt_2pi = math.log(math.sqrt(2 * math.pi))
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0, 1)
@property
def sigma(self):
# sigma = log(exp(rho) + 1) = softplus
return F.softplus(self.rho, beta=1) # this should be a numerically better option
# return torch.log1p(torch.exp(self.rho))
def sample(self, stochastic=False, return_log_prob=False):
wsize = self.mu.numel()
sigma = self.sigma
if stochastic:
# epsilon = self.normal.sample(self.rho.size()).type_as(self.mu)
epsilon = torch.rand_like(self.mu)
var = sigma * epsilon
# return torch.addcmul(self.mu, self.sigma, epsilon)
w = self.mu + var
else:
w = self.mu
var = 0
if not return_log_prob:
return w, 0
else:
sigma = sigma.float()
log_prob = (- log_sqrt_2pi
- torch.log(sigma)
- (var ** 2) / (2 * sigma ** 2)).sum()
# log_prob = (-(var ** 2) / (2 * sigma) - torch.log(sigma) - math.log(math.sqrt(2 * math.pi))).sum()
return w, log_prob
def log_prob(self, input):
sigma = self.sigma.float()
input = input.float()
return (math.log(math.sqrt(2 * math.pi))
- torch.log(sigma)
- ((input - self.mu) ** 2) / (2 * sigma ** 2)).sum()
class ScaleMixtureGaussian(object):
def __init__(self, pi=None, sigma1=None, sigma2=None):
super().__init__()
from onmt.constants import neg_log_sigma1, neg_log_sigma2, prior_pi
sigma1 = torch.cuda.FloatTensor([math.exp(-neg_log_sigma1)]) if sigma1 is None else sigma1
sigma2 = torch.cuda.FloatTensor([math.exp(-neg_log_sigma2)]) if sigma2 is None else sigma2
pi = prior_pi if pi is None else pi
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
self.gaussian1 = torch.distributions.Normal(0, sigma1)
self.gaussian2 = torch.distributions.Normal(0, sigma2)
def log_prob(self, input):
# input = input.float() # for exp better to cast to float
# print(input.type())
prob1 = torch.exp(self.gaussian1.log_prob(input))
prob2 = torch.exp(self.gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1 - self.pi) * prob2)).sum()
| 2,619
| 33.473684
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/generator.py
|
class Generator(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False):
super(Generator, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear = nn.Linear(hidden_size, output_size)
self.fix_norm = fix_norm
stdv = 1. / math.sqrt(self.linear.weight.size(1))
torch.nn.init.uniform_(self.linear.weight, -stdv, stdv)
self.linear.bias.data.zero_()
def forward(self, output_dicts):
"""
:param output_dicts: dictionary contains the outputs from the decoder
:return: logits (the elements before softmax)
"""
input = output_dicts['hidden']
fix_norm = self.fix_norm
target_mask = output_dicts['target_mask']
if not fix_norm:
logits = self.linear(input).float()
else:
normalized_weights = F.normalize(self.linear.weight, dim=-1)
normalized_bias = self.linear.bias
logits = F.linear(input, normalized_weights, normalized_bias)
# softmax will be done at the loss function
# output = F.log_softmax(logits, dim=-1, dtype=torch.float32)
return logits
| 1,214
| 31.837838
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/relative_self_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
from ..optimized.relative_self_attention_func import relative_self_attn_func
# from .fast_self_multihead_attn_func import fast_self_attn_func
# from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
# from apex.normalization.fused_layer_norm import FusedLayerNorm
class RelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, num_heads, dropout=0.):
super().__init__()
self.model_size = model_size
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = model_size // num_heads
assert self.head_dim * num_heads == self.model_size, "model_size must be divisible by num_heads"
self.bias = True
self.log_prior = 0
self.log_variational_posterior = 0
in_proj_weight_mu = torch.Tensor(3 * model_size, model_size)
in_proj_weight_rho = torch.Tensor(3 * model_size, model_size)
out_proj_weight_mu = torch.Tensor(model_size, model_size)
out_proj_weight_rho = torch.Tensor(model_size, model_size)
pos_proj_weight_mu = torch.Tensor(model_size, model_size)
pos_proj_weight_rho = torch.Tensor(model_size, model_size)
in_proj_bias_mu = torch.Tensor(3*model_size)
in_proj_bias_rho = torch.Tensor(3*model_size)
out_proj_bias_mu = torch.Tensor(model_size)
out_proj_bias_rho = torch.Tensor(model_size)
pos_proj_bias_mu = torch.Tensor(model_size)
pos_proj_bias_rho = torch.Tensor(model_size)
r_w_bias_mu = torch.Tensor(self.num_heads, self.head_dim)
r_w_bias_rho = torch.Tensor(self.num_heads, self.head_dim)
r_r_bias_mu = torch.Tensor(self.num_heads, self.head_dim)
r_r_bias_rho = torch.Tensor(self.num_heads, self.head_dim)
mu, self.indices, self.shapes = flatten_list([in_proj_weight_mu, out_proj_weight_mu, pos_proj_weight_mu,
in_proj_bias_mu, out_proj_bias_mu, pos_proj_bias_mu,
r_w_bias_mu, r_r_bias_mu])
rho, _, _ = flatten_list([in_proj_weight_rho, out_proj_weight_rho, pos_proj_weight_rho,
in_proj_bias_rho, out_proj_bias_rho, pos_proj_bias_rho,
r_w_bias_rho, r_r_bias_rho])
self.mu = Parameter(mu)
self.rho = Parameter(rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.reset_parameters()
self.attn_func = relative_self_attn_func
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
nn.init.normal_(self.mu, 0.0, std_)
nn.init.normal_(self.rho, -5, 0.1)
# nn.init.uniform_(self.rho, -6, -5)
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, mems=None,
incremental=False, incremental_cache=None, sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = self.weight.sample(sample, calculate_log_probs)
in_proj_weight, out_proj_weight, pos_proj_weight, \
in_proj_bias, out_proj_bias, pos_proj_bias, \
r_w_bias, r_r_bias = unflatten(sampled_weights, self.indices, self.shapes)
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False, False)
# last False is double precision
# KL Divergence between prior and (variational) posterior
if calculate_log_probs:
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return outputs, coverage
| 5,261
| 42.131148
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bayes_by_backprop/encdec_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from ..optimized.encdec_attention_func import encdec_attn_func
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0.):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
# two variables to record the (sum) of priors for all linear variables
self.log_prior = 0
self.log_variational_posterior = 0
# Q linear mapping weight
in_proj_weight_q_mu = torch.Tensor(embed_dim, embed_dim)
in_proj_weight_q_rho = torch.Tensor(embed_dim, embed_dim)
# KV Linear mapping weight
in_proj_weight_kv_mu = torch.Tensor(2 * embed_dim, embed_dim)
in_proj_weight_kv_rho = torch.Tensor(2 * embed_dim, embed_dim)
# Output linear mapping weight
out_proj_weight_mu = torch.Tensor(embed_dim, embed_dim)
out_proj_weight_rho = torch.Tensor(embed_dim, embed_dim)
self.mu, self.indices, self.shapes = \
flatten_list([in_proj_weight_q_mu, in_proj_weight_kv_mu, out_proj_weight_mu])
self.mu = Parameter(self.mu)
self.rho, _, _ = flatten_list([in_proj_weight_q_rho, in_proj_weight_kv_rho, out_proj_weight_rho])
self.rho = Parameter(self.rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.attn_func = encdec_attn_func
self.reset_parameters()
try:
# the fast one requires apex and does not work with incremental so careful
from apex.contrib.multihead_attn.fast_encdec_multihead_attn_func import fast_encdec_attn_func
self.attn_func_fast = fast_encdec_attn_func
self.optimized = 1
except ModuleNotFoundError as e:
self.optimized = 2
self.attn_func_fast = None
def reset_parameters(self):
# We initialize μ with a Gaussian around 0
# (just as we would initialize standard weights of a neural network)
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.mu, 0.0, std_)
# It is important to initialize ρ (and hence σ) to a small value, otherwise learning might not work properly.
nn.init.normal_(self.rho, -7, 0.1)
def forward(self, query, key, value, attn_mask=None, incremental=False, incremental_cache=None,
sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = \
self.weight.sample(stochastic=sample, return_log_prob=calculate_log_probs)
in_proj_weight_q, in_proj_weight_kv, out_proj_weight = unflatten(sampled_weights, self.indices, self.shapes)
# Perform forward with the sampled weights
if self.optimized == 1 and (self.training and not incremental) and len_key <= 1024 and query.is_cuda:
if attn_mask is not None:
if attn_mask.dim() == 3:
attn_mask = attn_mask.squeeze(1)
attn_mask = attn_mask.byte()
outputs = self.attn_func_fast(time_masking, is_training, self.num_heads,
query.type_as(in_proj_weight_q), key.type_as(in_proj_weight_q),
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
attn_mask, self.dropout)
coverage = None
# during evaluation we use the python binding which is safer ....
else:
outputs, coverage, = self.attn_func(time_masking, is_training,
self.num_heads, query, key,
in_proj_weight_q, in_proj_weight_kv,
out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache)
if calculate_log_probs:
# KL Divergence between prior and (variational) posterior
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return outputs, coverage
| 5,175
| 42.133333
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/kernels/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/kernels/kernel.py
|
"""Construct wide convolution kernels."""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
| 180
| 15.454545
| 50
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/mlp/test_mlp_gelu.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
try:
import mlp_gelu_blaslt
except (ModuleNotFoundError, ImportError) as e:
mlp_gelu_blaslt = None
torch.backends.cuda.matmul.allow_tf32 = True
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpReluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
output = fused_mlp_relu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_gelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
ctx.weight_requires_grad = args[1].requires_grad
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
if ctx.weight_requires_grad:
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
for i in range(len(ctx.saved_tensors) - 1):
grads.append(None)
del ctx.outputs
del ctx.p
return (None, *grads)
class MlpGeLUFunctionBLASLT(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
outputs = mlp_gelu_blaslt.forward(p, args)
ctx.save_for_backward(*args)
ctx.recompute = recompute
if recompute:
ctx.outputs = (outputs[0], outputs[-1])
else:
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
recompute = ctx.recompute
grads = mlp_gelu_blaslt.backward(p, recompute, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
# del ctx.p
# del ctx.recompute
return (None, None, *grads)
if fused_mlp_gelu:
mlp_gelu_function = MlpGeLUFunction.apply
else:
mlp_gelu_function = None
if mlp_gelu_blaslt:
mlp_gelu_function_blaslt = MlpGeLUFunctionBLASLT.apply
else:
mlp_gelu_function_blaslt = None
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.5):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., 0.0)
def forward(self, input, mask=None, ref=False, fastest=False, recompute=False):
if fastest and not ref:
return mlp_gelu_function_blaslt(self.dropout, recompute, input, *self.weights, *self.biases)
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
if self.dropout > 0:
output = F.gelu(output) * dropout_mask.view_as(output) * pinv
else:
output = F.gelu(output)
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}, dropout={self.dropout}"
return s
seq_len = 1
batch_size = 1024
mlp_sizes = [1024, 4096, 1024]
num_iters = 512
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
print("Test numeric 3D ....")
for dropout in [0.0, 0.2, 0.5, 0.7]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(8, batch_size // 8) * 8
test_input = torch.empty(seq_len, bsz, mlp_sizes[0], device="cuda").uniform_(-1.,
1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input, fastest=True, recompute=True)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel(), dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
for i in range(len(mlp.weights)):
np.testing.assert_allclose(
mlp.weights[i].grad.detach().cpu().numpy(),
ref_mlp.weights[i].grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[i].grad.detach().cpu().numpy(),
ref_mlp.biases[i].grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
#
# def test_with_bias(self):
# mlp = MLP(mlp_sizes, activation=use_activation).cuda()
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input, fastest=True)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# for l in range(mlp.num_layers):
# np.testing.assert_allclose(
# mlp.weights[l].grad.detach().cpu().numpy(),
# ref_mlp.weights[l].grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
# np.testing.assert_allclose(
# mlp.biases[l].grad.detach().cpu().numpy(),
# ref_mlp.biases[l].grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# def test_no_weight_grad(self):
#
# print("Test backward no weight grad ...")
# for dropout in [0.0, 0.35]:
# mlp = MLP(mlp_sizes, activation="gelu", dropout=dropout).cuda()
# print(mlp)
# for p in mlp.parameters():
# p.requires_grad = False
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
#
# # for l in range(mlp.num_layers):
# # np.testing.assert_allclose(
# # mlp.weights[l].grad.detach().cpu().numpy(),
# # ref_mlp.weights[l].grad.detach().cpu().numpy(),
# # atol=1e-7, rtol=1e-5)
# # np.testing.assert_allclose(
# # mlp.biases[l].grad.detach().cpu().numpy(),
# # ref_mlp.biases[l].grad.detach().cpu().numpy(),
# # atol=1e-7, rtol=1e-5)
#
# def test_no_grad(self):
# mlp = MLP(mlp_sizes).cuda()
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
# ref_input = test_input.clone().detach()
# mlp_out, dropout_mask = mlp(test_input)
#
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
def test_performance_half(self):
print("Testing performance ...")
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda().half()
print(mlp)
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(dropout))
ref_mlp = nn.Sequential(*mlp_layers)
ref_mlp_compiled = nn.Sequential(*mlp_layers)
# ref_mlp = torch.compile(ref_mlp)
ref_mlp = ref_mlp.cuda().half()
ref_mlp_compiled = torch.compile(ref_mlp_compiled)
ref_mlp_compiled = ref_mlp_compiled.cuda().half()
test_input = torch.empty(
seq_len* batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len* batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp_compiled(ref_input)
ref_loss = ref_out.mean()
ref_mlp_compiled.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP compiled time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"Pytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"C++ MLP 3D time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input.view(-1, test_input.size(-1)))
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input, fastest=True)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"BLASLT MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input, fastest=True, recompute=True)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"BLASLT MLP recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# def test_performance_half_no_grad_weight(self):
# print("Testing performance without backward to weight ...")
# for dropout in [0.0, 0.5]:
# mlp = MLP(mlp_sizes, dropout=dropout).cuda().half()
#
# mlp_layers = []
# for i in range(mlp.num_layers):
# linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
# mlp.weights[i].data.copy_(linear.weight)
# mlp.biases[i].data.copy_(linear.bias)
# mlp_layers.append(linear)
# if i < mlp.num_layers - 1:
# # mlp_layers.append(nn.ReLU(inplace=True))
# mlp_layers.append(torch.nn.GELU())
# mlp_layers.append(nn.Dropout(dropout))
#
# ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
#
# for p in mlp.parameters():
# p.requires_grad = False
#
# for p in ref_mlp.parameters():
# p.requires_grad = False
#
# test_input = torch.empty(
# batch_size, seq_len, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# ref_input = torch.empty(
# batch_size, seq_len, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
#
# # Warm up GPU
# for _ in range(num_iters):
# ref_out = ref_mlp(ref_input)
# ref_loss = ref_out.mean()
# ref_mlp.zero_grad()
# ref_loss.backward()
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# ref_out = ref_mlp(ref_input)
# ref_loss = ref_out.mean()
# ref_mlp.zero_grad()
# ref_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
#
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 22,661
| 38.005164
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/mlp/mlp.py
|
from copy import copy
import math
import torch
from torch import nn
import unittest
from time import time
import numpy as np
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_silu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_silu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu_dropout_add
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu_dropout_add = None
try:
import mlp_gelu_blaslt
except (ModuleNotFoundError, ImportError) as e:
mlp_gelu_blaslt = None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class MlpReluFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, p, recompute, *args):
# only need to store dropout mask if we need to recompute
store_dropout_mask = recompute
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
ctx.recompute = recompute
if not recompute:
ctx.outputs = output
ctx.dropout_mask = None
else:
ctx.dropout_mask = output[-1]
ctx.outputs = None
ctx.p = p
return output[0]
@staticmethod
def backward(ctx, *grad_o):
p = ctx.p
if not ctx.recompute:
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
else:
grads = fused_mlp_relu.backward_recompute(p, grad_o[0], ctx.dropout_mask, ctx.saved_tensors)
del ctx.dropout_mask
return (None, None, *grads)
if fused_mlp_relu:
def mlp_relu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpReluFunction.apply(*args)
else:
mlp_relu_function = None
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
output = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_silu:
# mlp_silu_function = MlpSiluFunction.apply
def mlp_silu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpSiluFunction.apply(*args)
else:
mlp_silu_function = None
class MlpGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
if mlp_gelu_blaslt is not None:
output = mlp_gelu_blaslt.forward(p, args)
if recompute:
ctx.outputs = (output[0], output[-1])
del output[1]
del output[2]
else:
ctx.outputs = output
else:
output = fused_mlp_gelu.forward(p, args)
ctx.outputs = output
ctx.save_for_backward(*args)
# dropout_mask = output[-1]
ctx.p = p
ctx.recompute = recompute
ctx.requires_grad_weight = args[1].requires_grad
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
recompute = ctx.recompute
if ctx.requires_grad_weight:
if mlp_gelu_blaslt is not None:
grads = mlp_gelu_blaslt.backward(p, recompute, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
if mlp_gelu_blaslt is not None:
grads = mlp_gelu_blaslt.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
for _ in range(len(ctx.saved_tensors) - 1):
grads.append(None)
del ctx.requires_grad_weight
del ctx.outputs
del ctx.p
del ctx.recompute
return (None, None, *grads)
if fused_mlp_gelu or mlp_gelu_blaslt:
def mlp_gelu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpGELUFunction.apply(*args)
else:
mlp_gelu_function = None
class MlpAGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
output = fused_mlp_agelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_agelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_agelu:
def mlp_agelu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpAGELUFunction.apply(*args)
else:
mlp_agelu_function = None
if __name__ == '__main__':
from copy import deepcopy
import torch.nn.functional as F
import random
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def fast_gelu_1(x):
# sqrt(2/pi) = 0.7978845608028654
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * (x + 0.044715 * torch.pow(x, 3.0))))
def __init__(self, mlp_sizes, activation='gelu', dropout=0.25):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False, blaslt=False):
if ref:
return self.forward_ref(input, mask=mask)
if not blaslt:
return mlp_gelu_function(self.dropout, False, input, *self.weights, *self.biases)
# print(input.type(), self.weights[0].type())
return mlp_gelu_blaslt_function(input, self.weights[0], self.biases[0], self.weights[1], self.biases[1])
def forward_ref(self, input, mask=None):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 10
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='gelu').cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, ref=True)
# print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.25))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out = mlp(test_input, blaslt=True)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"BLASLT MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
unittest.main()
| 15,995
| 33.252677
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/mlp/test_mlp_relu.py
|
from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
torch.set_float32_matmul_precision('high')
class MlpReluFunction(torch.autograd.Function):
@staticmethod
# @custom_fwd(cast_inputs=torch.float16)
@custom_fwd
def forward(ctx, p, *args):
store_dropout_mask = True
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
if fused_mlp_relu:
mlp_relu_function = MlpReluFunction.apply
else:
mlp_relu_function = None
class MlpReluRecomputeFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, *args):
store_dropout_mask = True
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
dropout_mask = output[-1]
ctx.dropout_mask = dropout_mask
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
dropout_mask = ctx.dropout_mask
grads = fused_mlp_relu.backward_recompute(p, grad_o[0], dropout_mask, ctx.saved_tensors)
del ctx.dropout_mask
return (None, *grads)
if fused_mlp_relu:
mlp_relu_recompute_function = MlpReluRecomputeFunction.apply
else:
mlp_relu_recompute_function = None
def foo(x, y):
a = torch.sin(x)
b = torch.cos(x)
return a + b
opt_foo1 = torch.compile(foo)
print("COMPILED")
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='relu', dropout=0.25, recompute=False):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
self.recompute = recompute
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False):
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
if self.recompute:
return mlp_relu_recompute_function(self.dropout, input, *self.weights, *self.biases)
else:
return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
if self.dropout > 0:
output = F.relu(output) * dropout_mask.view_as(output) * pinv
else:
output = F.relu(output)
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}, dropout={self.dropout}"
return s
batch_size = 1024
seq_len = 64
mlp_sizes = [512, 4096, 512]
# mlp_sizes = [4, 7, 4]
num_iters = 64
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
print("Test numeric 3D ....")
for dropout in [0.0, 0.2, 0.5, 0.7]:
mlp = MLP(mlp_sizes, activation='relu', dropout=dropout).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(64, batch_size // 8) * 8
test_input = torch.empty(seq_len, bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes).cuda().half()
ref_mlp_fast = MLP(mlp_sizes, recompute=False, dropout=dropout).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.ReLU())
mlp_layers.append(nn.Dropout(dropout))
ref_mlp = nn.Sequential(*mlp_layers).cuda()
print("Compiling ref mlp ...")
ref_mlp = torch.compile(ref_mlp)
ref_mlp = ref_mlp.half()
test_input = torch.empty(
seq_len, batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len, batch_size // 2, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = ref_mlp_fast(ref_input)
test_loss = mlp_out.mean()
ref_mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 13,167
| 36.409091
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/mlp/__init__.py
|
from .mlp import *
| 19
| 9
| 18
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/transformer_xl.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.bottle import Bottle
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, StreamState, \
StreamDecodingState, RelativeTransformerDecoder
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
class TransformerXLDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0):
super(TransformerXLDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(h, d_model, d_head, dropatt=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input_, context, pos_emb, mask_tgt, mask_src, mems=None,
incremental=False, incremental_cache=None):
# incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer with memory
layernorm > attn > dropout > residual
"""
assert context is None, "This model does not have an context encoder"
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
query = self.preprocess_attn(input_)
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_attn(out, input_)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input_))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_ffn(out, input_)
else:
coverage = None
if incremental:
return input_, coverage, incremental_cache
return input_, coverage
class TransformerXL(RelativeTransformerDecoder):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, tgt_embedding, generator,
language_embeddings=None, **kwargs):
# self.tgt_embedding = tgt_embedding
self.model_size = opt.model_size
# build_modules will be called from the inherited constructor
super().__init__(opt, tgt_embedding,
None,
language_embeddings=language_embeddings,
ignore_source=True)
self.tgt_embedding = tgt_embedding
self.generator = generator
self.ignore_source = True
self.same_length = False
self.clamp_len = 0
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer LM Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = TransformerXLDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def reset_states(self):
return
def tie_weights(self):
self.generator[0].linear.weight = self.tgt_embedding.weight
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
tgt = batch.get('target_input')
tgt_lang = batch.get('target_lang')
if streaming:
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.tgt_mems
else:
mems = None
qlen = tgt.size(0)
word_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0)
word_emb.mul_(self.model_size ** 0.5)
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
word_emb = word_emb + lang_emb
else:
raise NotImplementedError
mlen = mems[0].size(0) if mems is not None else 0
# total length: memory + current input
klen = mlen + qlen
# all units having the same attention range
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.positional_encoder(pos)
# Applying dropout
output = self.preprocess_layer(word_emb)
if streaming:
hids = [output]
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
mems_i = None if mems is None else mems[i]
output, coverage = layer(output, None, pos_emb, dec_attn_mask, None,
mems=mems_i) # context and context_mask are None
if streaming:
hids.append(output)
# Final normalization
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': None, 'src': None,
'target_mask': target_mask}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
if streaming:
streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = StreamState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
# make a simple sampling sequence from some input
def sample(self, input):
return
| 9,715
| 36.513514
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/transformers.py
|
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer, PositionalEncoding, \
PrePostProcessing
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout, switchout
from onmt.modules.linear import FeedForward, FeedForwardSwish
from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleDecoderFunction, ReversibleTransformerDecoderLayer
from onmt.utils import flip, expected_length
torch_version = float(torch.__version__[:3])
class MixedEncoder(nn.Module):
def __init(self, text_encoder, audio_encoder):
self.text_encoder = text_encoder
self.audio_encoder = audio_encoder
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (to be transposed)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
if input.dim() == 2:
return self.text_encoder.forward(input)
else:
return self.audio_encoder.forward(input)
class TransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, embedding, positional_encoder, encoder_type='text', language_embeddings=None):
super(TransformerEncoder, self).__init__()
self.opt = opt
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
if hasattr(opt, 'encoder_layers') and opt.encoder_layers != -1:
self.layers = opt.encoder_layers
else:
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.input_type = encoder_type
self.cnn_downsampling = opt.cnn_downsampling
self.death_rate = opt.death_rate
self.switchout = opt.switchout
self.varitional_dropout = opt.variational_dropout
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.time = opt.time
self.lsh_src_attention = opt.lsh_src_attention
self.reversible = opt.src_reversible
feature_size = opt.input_size
self.channels = 1 # n. audio channels
if opt.upsampling:
feature_size = feature_size // 4
if encoder_type != "text":
if not self.cnn_downsampling:
self.audio_trans = nn.Linear(feature_size, self.model_size)
torch.nn.init.xavier_uniform_(self.audio_trans.weight)
else:
channels = self.channels # should be 1
if not opt.no_batch_norm:
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
else:
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True)]
feat_size = (((feature_size // channels) - 3) // 4) * 32
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
# assert self.model_size == feat_size, \
# "The model dimension doesn't match with the feature dim, expecting %d " % feat_size
else:
self.word_lut = embedding
self.time_transformer = positional_encoder
self.language_embedding = language_embeddings
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.varitional_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList()
self.build_modules()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = EncoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (to be transposed)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.SRC_PAD).unsqueeze(1) # batch_size x 1 x len_src for broadcasting
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
# B x T x H -> T x B x H
context = emb.transpose(0, 1)
context = self.preprocess_layer(context)
if self.reversible:
# x_1 and x_2 are the same at first for reversible
context = torch.cat([context, context], dim=-1)
context = ReversibleEncoderFunction.apply(context, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
context = layer(context, mask_src) # batch_size x len_src x d_model
context = self.postprocess_layer(context)
output_dict = {'context': context, 'src_mask': mask_src}
# return context, mask_src
return output_dict
class TransformerDecoder(nn.Module):
"""Decoder in 'Attention is all you need'"""
def __init__(self, opt, embedding, positional_encoder,
language_embeddings=None, ignore_source=False, allocate_positions=True):
"""
:param opt:
:param embedding:
:param positional_encoder:
:param attribute_embeddings:
:param ignore_source:
"""
super(TransformerDecoder, self).__init__()
opt.ignore_source = ignore_source
self.opt = opt
self.model_size = opt.model_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.encoder_type = opt.encoder_type
self.ignore_source = ignore_source
self.encoder_cnn_downsampling = opt.cnn_downsampling
self.variational_dropout = opt.variational_dropout
self.switchout = opt.switchout
self.death_rate = opt.death_rate
self.time = opt.time
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.reversible = opt.tgt_reversible
self.time_transformer = positional_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.variational_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = embedding
# Using feature embeddings in models
self.language_embeddings = language_embeddings
if self.language_embedding_type == 'concat':
self.projector = nn.Linear(opt.model_size * 2, opt.model_size)
self.positional_encoder = positional_encoder
if allocate_positions:
if hasattr(self.positional_encoder, 'len_max'):
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max, len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
self.layer_modules = nn.ModuleList()
self.build_modules()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
# block = DecoderLayer(self.n_heads, self.model_size,
# self.dropout, self.inner_size, self.attn_dropout,
# variational=self.variational_dropout, death_rate=death_r)
block = DecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt, death_rate=_l)
self.layer_modules.append(block)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def process_embedding(self, input, input_lang=None):
input_ = input
emb = embedded_dropout(self.word_lut, input_, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
return emb
def forward(self, input, context, src, tgt_lang=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = self.process_embedding(input, tgt_lang)
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
# mask_tgt = input.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
mask_tgt = mask_tgt.bool()
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
if self.reversible:
# x_1 and x_2 are the same at first for reversible
output = torch.cat([output, output], dim=-1)
output = ReversibleDecoderFunction.apply(output, context, self.layer_modules,
mask_tgt, mask_src)
coverage = None
else:
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, mask_tgt, mask_src) # batch_size x len_src x d_model
# From Google T2T: normalization to control network output magnitude
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
# return output, None
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
# mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input_)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if self.encoder_type == "audio":
if src.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
# only get the final step of the mask during decoding (because the input of the network is only the last step)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
if torch_version >= 1.2:
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
if self.reversible:
# x_1 and x_2 are the same at first for reversible
# output = torch.cat([output, output], dim=-1)
output1, output2 = output, output
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert (output.size(0) == 1)
if self.reversible:
output1, output2, coverage, buffer = layer(output1, output2, context, mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
else:
output, coverage, buffer = layer(output, context, mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
if self.reversible:
output = output1 + output2
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class Transformer(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.switchout = self.decoder.switchout
if hasattr(self.decoder, 'word_lut'):
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.reconstruct:
self.rec_linear = nn.Linear(decoder.model_size, decoder.model_size)
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, factorize=True,
pretrained_layer_states=None, **kwargs):
"""
:param pretrained_layer_states:
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, streaming=streaming,
src_lengths=src_lengths, streaming_state=streaming_state, factorize=factorize,
pretrained_layer_states=pretrained_layer_states)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# the state is changed if streaming
streaming_state = encoder_output['streaming_state']
# zero out the encoder part for pre-training
if zero_encoder:
context.zero_()
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state, factorize=factorize)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# output_dict['lid_logits'] = decoder_output['lid_logits']
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# Reconstruction network
if self.reconstruct:
bos = org_tgt[0].unsqueeze(0) # 1 x B
src_input = torch.cat([bos, org_src[:-1]], dim=0) # T x B
src_output = org_src
src_input = src_input.transpose(0, 1)
rec_context = self.rec_linear(output_dict['hidden']) # T x B x H
rec_decoder_output = self.rec_decoder(src_input, rec_context, tgt, tgt_lang=src_lang, input_pos=src_pos)
rec_output = rec_decoder_output['hidden']
rec_logprobs = self.rec_generator[0](rec_decoder_output)['logits']
output_dict['rec_logprobs'] = rec_logprobs
output_dict['rec_hidden'] = rec_output
output_dict['reconstruct'] = True
output_dict['rec_target'] = src_output
else:
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def load_encoder_weights(self, pretrained_model):
pretrained_model.encoder.language_embedding = None
enc_language_embedding = self.encoder.language_embedding
self.encoder.language_embedding = None
encoder_state_dict = pretrained_model.encoder.state_dict()
self.encoder.load_state_dict(encoder_state_dict)
self.encoder.language_embedding = enc_language_embedding
def decode(self, batch, pretrained_layer_states=None):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang,
pretrained_layer_states=pretrained_layer_states)['context']
if hasattr(self, 'autoencoder') and self.autoencoder \
and self.autoencoder.representation == "EncoderHiddenState":
context = self.autoencoder.autocode(context)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
if hasattr(self, 'autoencoder') and self.autoencoder and \
self.autoencoder.representation == "DecoderHiddenState":
output = self.autoencoder.autocode(output)
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
dec_out = self.generator[0](dec_out)
# gen_t = self.generator[0](dec_out)['logits']
else:
dec_out = self.generator(dec_out)
gen_t = dec_out['logits']
if dec_out['softmaxed'] is False:
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
self.decoder.renew_buffer(new_len)
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
if isinstance(self.generator, nn.ModuleList):
output_dict = self.generator[0](output_dict)
else:
output_dict = self.generator(output_dict)
log_prob = output_dict['logits'].squeeze(0)
# the key 'softmaxed' should be included in generators.
# The 'normal linear + CE' doesn't need softmax
if output_dict['softmaxed'] is False:
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
try:
last_coverage = coverage[:, -1, :].squeeze(1)
except TypeError:
last_coverage = None
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True,
pretrained_classifier=None, pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param pretrained_classifier: model to create mixtures
:param buffering:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1)
if pretrained_classifier is not None:
mixture = pretrained_classifier(src_transposed)
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang, atb=tgt_atb,
pretrained_layer_states=pretrained_layer_states)
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, tgt_atb=tgt_atb)
return decoder_state
def init_stream(self):
pass
def set_memory_size(self, src_memory_size, tgt_memory_size):
pass
class TransformerDecodingState(DecoderState):
def __init__(self, src, tgt_lang, context, src_lang, beam_size=1, model_size=512, type=2,
cloning=True, buffering=False, src_mask=None, tgt_atb=None,
dec_pretrained_model="", ):
"""
:param src:
:param tgt_lang:
:param context:
:param src_lang:
:param beam_size:
:param model_size:
:param type: Type 1 is for old translation code. Type 2 is for fast buffering. (Type 2 default).
:param cloning:
:param buffering:
"""
self.beam_size = beam_size
self.model_size = model_size
self.attention_buffers = dict()
self.buffering = buffering
self.dec_pretrained_model = dec_pretrained_model
self.tgt_atb = tgt_atb
if type == 1:
# if audio only take one dimension since only used for mask
raise NotImplementedError
# self.original_src = src # TxBxC
# self.concat_input_seq = True
#
# if src is not None:
# if src.dim() == 3:
# # print(self.src.size())
# self.src = src.narrow(2, 0, 1).squeeze(2).repeat(1, beam_size)
# # self.src = src.repeat(1, beam_size, 1)
# # print(self.src.size())
# # self.src = src.repeat(1, beam_size, 1) # T x Bb x c
# else:
# self.src = src.repeat(1, beam_size)
# else:
# self.src = None
#
# if context is not None:
# self.context = context.repeat(1, beam_size, 1)
# else:
# self.context = None
#
# self.input_seq = None
# self.src_lang = src_lang
# self.tgt_lang = tgt_lang
elif type == 2:
bsz = src.size(1) # src is T x B
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if cloning:
self.src = src.index_select(1, new_order) # because src is time first
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
if src_mask is not None:
self.src_mask = src_mask.index_select(0, new_order)
else:
self.src_mask = None
else:
self.context = context
self.src = src
# self.src_mask = src_mask
self.concat_input_seq = False
self.tgt_lang = tgt_lang
self.src_lang = src_lang
else:
raise NotImplementedError
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer # dict of 2 keys (k, v) : T x B x H
def update_beam(self, beam, b, remaining_sents, idx):
if self.beam_size == 1:
return
for tensor in [self.src, self.input_seq]:
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is None:
continue
for k in buffer_:
t_, br_, d_ = buffer_[k].size()
sent_states = buffer_[k].view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active_with_hidden(t):
if t is None:
return t
dim = t.size(-1)
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_without_hidden(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active_with_hidden(self.context)
self.input_seq = update_active_without_hidden(self.input_seq)
if self.src.dim() == 2:
self.src = update_active_without_hidden(self.src)
elif self.src.dim() == 3:
t = self.src
dim = t.size(-1)
view = t.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
self.src = new_t
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
for k in buffer_:
buffer_[k] = update_active_with_hidden(buffer_[k])
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
if self.src_mask is not None:
self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state)
# if not self.dec_pretrained_model:
# buffer_[k] = buffer_[k].index_select(1, reorder_state) # beam/batch is the 2nd dim
# elif self.dec_pretrained_model in ["bert", "roberta", "bart"]:
# buffer_[k] = buffer_[k].index_select(0, reorder_state) # beam/batch is the first dim
# elif self.dec_pretrained_model in ["mbart", "mbart50"]:
# buffer_[k] = buffer_[k].index_select(1, reorder_state) # beam/batch is the 2nd dim
# else:
# print("Warning: check dec_pretrained_model type")
# raise NotImplementedError
| 40,743
| 39.18146
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/performer_layer.py
|
import torch
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b=b, h=h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(), kernel_epsilon=0.001, normalize_data=True,
device=None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b=b, h=h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.qr(unstructured_block.cpu(), some=True)
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
# what is nb?
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# non-causal linear attention
def linear_attention(q, k, v):
k_cumsum = k.sum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0, causal=False, generalized_attention=False,
kernel_fn=nn.ReLU(), no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows=self.nb_features,
nb_columns=dim_heads, scaling=ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = causal
if causal:
try:
import fast_transformers.causal_product.causal_product_cuda
self.causal_linear_fn = partial(causal_linear_attention)
except ImportError:
print(
'unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
self.causal_linear_fn = causal_linear_attention_noncuda
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k = torch.exp(k) if self.causal else k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.kernel_fn,
projection_matrix=self.projection_matrix, device=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
out = attn_fn(q, k, v)
return out
class ProjectionUpdater(nn.Module):
def __init__(self, instance, feature_redraw_interval):
super().__init__()
self.instance = instance
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projections_(self):
self.feature_redraw_interval = None
def redraw_projections(self):
model = self.instance
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(model)
fast_attentions = find_modules(model, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x):
raise NotImplemented
class Attention(nn.Module):
def __init__(
self,
dim,
causal=False,
heads=8,
dim_head=64,
local_heads=0,
local_window_size=256,
nb_features=None,
feature_redraw_interval=1000,
generalized_attention=False,
kernel_fn=nn.ReLU(),
dropout=0.,
no_projection=False,
qkv_bias=False,
attn_out_bias=True
):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = default(dim_head, dim // heads)
inner_dim = dim_head * heads
self.fast_attention = FastAttention(dim_head, nb_features, causal=causal,
generalized_attention=generalized_attention, kernel_fn=kernel_fn,
no_projection=no_projection)
self.heads = heads
self.global_heads = heads - local_heads
self.local_attn = LocalAttention(window_size=local_window_size, causal=causal, autopad=True, dropout=dropout,
look_forward=int(not causal),
rel_pos_emb_config=(dim_head, local_heads)) if local_heads > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_k = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_v = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_out = nn.Linear(inner_dim, dim, bias=attn_out_bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, pos_emb=None, context=None, mask=None, context_mask=None, **kwargs):
b, n, _, h, gh = *x.shape, self.heads, self.global_heads
cross_attend = exists(context)
context = default(context, x)
context_mask = default(context_mask, mask) if not cross_attend else context_mask
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
attn_outs = []
if not empty(q):
if exists(context_mask):
global_mask = context_mask[:, None, :, None]
v.masked_fill_(~global_mask, 0.)
if exists(pos_emb) and not cross_attend:
q, k = apply_rotary_pos_emb(q, k, pos_emb)
out = self.fast_attention(q, k, v)
attn_outs.append(out)
if not empty(lq):
assert not cross_attend, 'local attention is not compatible with cross attention'
out = self.local_attn(lq, lk, lv, input_mask=mask)
attn_outs.append(out)
out = torch.cat(attn_outs, dim=1)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return self.dropout(out)
| 9,531
| 35.945736
| 136
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/transformer_layers.py
|
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from collections import defaultdict
from onmt.modules.pre_post_processing import PrePostProcessing
# class PrePostProcessing(nn.Module):
# """Applies processing to tensors
# Args:
# d_model: dimension of model
# p: dropout probabolity
# sequence of processing steps:
# n = normalization
# d = dropout
# a = adding previous input to output (residual)
# """
#
# def __init__(self, d_model, dropout_p, sequence='nda', variational=False, elementwise_affine=True):
# super(PrePostProcessing, self).__init__()
# self.d_model = d_model
# self.dropout_p = dropout_p
#
# self.steps = list(sequence)
#
# if onmt.constants.residual_type == 'gated':
# # gated residual
# # initialize k with one
# self.k = nn.Parameter(torch.ones(1))
#
# if 'n' in self.steps:
# ln = nn.LayerNorm((self.d_model,), elementwise_affine=elementwise_affine)
# self.layer_norm = Bottle(ln)
# if 'd' in self.steps:
# if variational:
# self.dropout = VariationalDropout(self.dropout_p, batch_first=False)
# else:
# self.dropout = nn.Dropout(self.dropout_p)
# if 'z' in self.steps:
# # Rezero residual method
# self.g = nn.Parameter(torch.tensor(0.0))
#
# def forward(self, tensor, input_tensor=None, mask=None):
#
# output = tensor
# for step in self.steps:
# if step == 'n':
# output = self.layer_norm(output, mask=mask)
# if step == 'd':
# output = self.dropout(output)
# if step == 'a':
# if input_tensor is not None:
# output = output + input_tensor
# if step == 'z': # rezero-residual but scaling the output with initially small g
# output = output * self.g
# if input_tensor is not None:
# output = output + input_tensor
# return output
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class EncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(EncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, opt.dropout, sequence='n')
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout,
sequence='da', variational=self.variational)
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_self_attention:
self.multihead = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
else:
self.multihead = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# MCR feedforward
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _ = self.multihead(query, None, attn_mask, None)
else:
out, _ = self.multihead(query, query, query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# checking for inf/nan which can happen randomly in fp16 ...
if torch.isinf(input).any() or torch.isnan(input).any():
clamp_value = torch.finfo(input.dtype).max - 1000
input.clamp_(min=-clamp_value, max=clamp_value)
return input
class DecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, opt, death_rate=0.0):
super(DecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, opt.dropout, sequence='n')
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout,
sequence='da', variational=self.variational)
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_self_attention:
self.multihead_tgt = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
else:
self.multihead_tgt = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not opt.fast_xattention:
self.multihead_src = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
else:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
def forward(self, input, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental:
if incremental_cache is None:
incremental_cache = dict()
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# MCR feedforward
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _, = self.multihead_tgt(query, None, None, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
else:
out, _, = self.multihead_tgt(query, query, query, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# checking for inf/nan which can happen randomly in fp16 ...
if torch.isinf(input).any() or torch.isnan(input).any():
clamp_value = torch.finfo(input.dtype).max - 1000
input.clamp_(min=-clamp_value, max=clamp_value)
return input, coverage, incremental_cache
class PositionalEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=512):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(PositionalEncoding, self).__init__()
self.len_max = len_max
self.d_model = d_model
self.data_type = None
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
# detele the old variable to avoid Pytorch's error when register new buffer
cuda = False
if hasattr(self, 'pos_emb'):
cuda = self.pos_emb.is_cuda
# self.data_type = torch.type(self.pos_emb)
del self.pos_emb
position = torch.arange(0, new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales - 1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
pos_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
if cuda:
pos_emb = pos_emb.cuda()
if self.data_type is not None:
pos_emb.type(self.data_type)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('pos_emb', pos_emb)
# self.data_type = self.pos_emb.type()
self.len_max = new_max_len
def forward(self, word_emb, t=None):
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_ = self.pos_emb[:len_seq, :].type_as(word_emb)
out = word_emb + time_
else:
time_emb = self.pos_emb[len_seq - 1, :] # 1 x dim
# out should have size bs x 1 x dim
out = word_emb + time_emb.unsqueeze(0).repeat(word_emb.size(0), 1, 1).type_as(word_emb)
return out
def get_positional_embeddings(self, word_emb, t=None):
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_emb = self.pos_emb[:len_seq, :].type_as(word_emb)
else:
time_emb = self.pos_emb[len_seq - 1, :].unsqueeze(0).type_as(word_emb)
return time_emb
| 17,782
| 39.142212
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/pretrain_transformer.py
|
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.models.transformers import TransformerDecodingState
torch_version = float(torch.__version__[:3])
class PretrainTransformer(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
# if hasattr(decoder, 'dec_pretrained_model') and decoder.dec_pretrained_model:
# self.model_size = self.decoder.config.bert_hidden_size
# self.tgt_vocab_size = self.decoder.config.vocab_size
# self.switchout = 0
# else:
# self.model_size = self.decoder.model_size
# self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
# self.switchout = self.decoder.switchout
self.model_size = self.generator[0].linear.weight.size(1)
self.tgt_vocab_size = self.generator[0].linear.weight.size(0)
# if self.encoder.input_type == 'text':
# if hasattr(encoder, 'enc_pretrained_model') and encoder.enc_pretrained_model:
# self.src_vocab_size = self.encoder.config.vocab_size
# else:
# self.src_vocab_size = self.encoder.word_lut.weight.size(0)
# else:
self.src_vocab_size = self.tgt_vocab_size
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.reconstruct:
self.rec_linear = nn.Linear(decoder.model_size, decoder.model_size)
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, **kwargs):
"""
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
# if self.switchout > 0 and self.training:
# batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
src_attention_mask = src.ne(onmt.constants.SRC_PAD).long() # [b, src_len]
if hasattr(self.encoder, 'enc_pretrained_model') and self.encoder.enc_pretrained_model in ["bert", "roberta"]:
segments_tensor = src.ne(onmt.constants.SRC_PAD).long()
enc_outputs = self.encoder(src, src_attention_mask, segments_tensor) # the encoder is a pretrained model
context = enc_outputs[0]
encoder_output = defaultdict(lambda: None)
encoder_output['context'] = context
encoder_output['src_attention_mask'] = src_attention_mask
encoder_output['streaming_state'] = None
if hasattr(self.encoder, 'enc_pretrained_model') and \
self.encoder.enc_pretrained_model in ["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
# src_attention_mask = src.ne(onmt.constants.SRC_PAD).long()
src_attention_mask = batch.get("src_selfattn_mask")
enc_outputs = self.encoder(src, src_attention_mask) # the encoder is a pretrained model
context = enc_outputs[0]
context = context # .transpose(0, 1).contiguous()
encoder_output = defaultdict(lambda: None)
encoder_output['context'] = context
encoder_output['src_attention_mask'] = src_attention_mask
encoder_output['streaming_state'] = None
else:
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, streaming=streaming,
src_lengths=src_lengths, streaming_state=streaming_state)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
context = context.transpose(0, 1) # to make it consistent with bert batch first
# the state is changed
streaming_state = encoder_output['streaming_state']
# DECODER PART
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
# src: [b, src_l] context: [b, src_l, de_model]
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
token_type_ids=tgt_token_type,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
context = context.transpose(0, 1) # to [src_l, b, de_model]
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in \
["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
tgt_attention_mask = tgt.eq(onmt.constants.TGT_PAD).long() # [bsz, len]
# This mask is often ignored due to using a simple time-mask would also covers the pad-mask
# However it should be carefully handled in the case of using flash-attn
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
lang=tgt_lang,
)
decoder_output = decoder_output[0]
# output = decoder_output
output = decoder_output # .transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
# context = context.transpose(0, 1) # to [src_l, b, de_model]
else:
context = context.transpose(0, 1) # to [src_l, b, de_model] src: [b, l]
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang,
input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden'] # [tgt_len, bsz, d]
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output # [tgt_len, bsz, d]
output_dict['context'] = context # [b, l, de_model]
output_dict['src_mask'] = encoder_output['src_attention_mask'] # [b, l, de_model]
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# Reconstruction network
if self.reconstruct:
bos = org_tgt[0].unsqueeze(0) # 1 x B
src_input = torch.cat([bos, org_src[:-1]], dim=0) # T x B
src_output = org_src
src_input = src_input.transpose(0, 1)
rec_context = self.rec_linear(output_dict['hidden']) # T x B x H
rec_decoder_output = self.rec_decoder(src_input, rec_context, tgt, tgt_lang=src_lang, input_pos=src_pos)
rec_output = rec_decoder_output['hidden']
rec_logprobs = self.rec_generator[0](rec_decoder_output)['logits']
output_dict['rec_logprobs'] = rec_logprobs
output_dict['rec_hidden'] = rec_output
output_dict['reconstruct'] = True
output_dict['rec_target'] = src_output
else:
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang)['context']
if hasattr(self, 'autoencoder') and self.autoencoder \
and self.autoencoder.representation == "EncoderHiddenState":
context = self.autoencoder.autocode(context)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
if hasattr(self, 'autoencoder') and self.autoencoder and \
self.autoencoder.representation == "DecoderHiddenState":
output = self.autoencoder.autocode(output)
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
self.decoder.renew_buffer(new_len)
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index from time 0 to time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32) # [beam*b, 1, vocab_size]
output_dict['log_prob'] = log_prob.squeeze(1)
# Currently attention score is not returned
# coverage = output_dict['coverage']
# last_coverage = coverage[:, -1, :].squeeze(1)
# output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # [batch_size, src_len]
if not self.encoder.enc_pretrained_model:
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang)
elif self.encoder.enc_pretrained_model in ['mbart', 'mbart50', 'm2m', 'm2m100', "deltalm"]:
src_attention_mask = batch.get("src_selfattn_mask")
enc_outputs = self.encoder(src_transposed, src_attention_mask)
context = enc_outputs[0]
encoder_output = defaultdict(lambda: None)
encoder_output["context"] = context
else:
print("Warning: unknown enc_pretrained_model")
raise NotImplementedError
dec_pretrained_model = self.decoder.dec_pretrained_model
if not dec_pretrained_model:
mask_src = None
elif dec_pretrained_model in["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
mask_src = src_attention_mask # batch_size x 1 x len_src for broadcasting
else:
print("Warning: unknown dec_pretrained_model")
raise NotImplementedError
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=mask_src,
dec_pretrained_model=self.decoder.dec_pretrained_model)
return decoder_state
def init_stream(self):
pass
def set_memory_size(self, src_memory_size, tgt_memory_size):
pass
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
self.generator[0].linear.weight = self.decoder.embeddings.word_embeddings.weight
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
self.generator[0].linear.weight = self.decoder.embed_tokens.weight
else:
self.generator[0].linear.weight = self.decoder.word_lut.weight
| 18,440
| 45.923664
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/relative_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.reversible_models.relative_transformers import ReversibleEncoderFunction, ReversibleDecoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleTransformerEncoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.add_position_encoding = opt.add_position_encoding
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
raise NotImplementedError
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Encoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def create_stream_mask(self, input, input_length, prev_mem_size):
lengths = input_length.tolist()
mask = None
for length in lengths:
# the current mask should be either None or
if mask is None:
prev_length = 0
else:
prev_length = mask.size(1)
# n current queries attend to n + p keys
current_mask = input.new_zeros(length, length + prev_length)
if prev_length > 0:
prev_mask = input.new_ones(prev_length, length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
if prev_mem_size > 0:
# all current elements attend to all buffer elements
buffer_mask = mask.new_zeros(mask.size(0), prev_mem_size)
mask = torch.cat([buffer_mask, mask], dim=-1)
mask = mask.bool()
return mask
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
if streaming:
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.src_mems
# mem_len = streaming_state.src_mems[0].size(0)
# mem_len = streaming_state.prev_src_mem_size
mem_len = mems[0].size(0) if mems is not None else 0
input_length = kwargs.get('src_lengths', None)
streaming_state = kwargs.get('streaming_state', None)
mask_src = self.create_stream_mask(input, input_length, mem_len)
mask_src = mask_src.unsqueeze(2)
else:
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
# print(lang_emb.size(), emb.size())
emb = emb + lang_emb.unsqueeze(0)
else:
if streaming:
raise NotImplementedError
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
if self.learnable_position_encoding:
raise NotImplementedError
# B x T x H -> T x B x H
context = emb
if streaming:
hids = [context]
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
if self.reversible:
context = torch.cat([context, context], dim=-1)
assert streaming is not True, "Streaming and Reversible is not usable yet."
# print(context.size(), pos_emb.size())
context = ReversibleEncoderFunction.apply(context, pos_emb, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
mems_i = mems[i] if mems is not None and streaming and self.max_memory_size > 0 else None
context = layer(context, pos_emb, mask_src, mems=mems_i)
if streaming:
hids.append(context)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
if streaming:
# streaming_state.prev_src_mem_size += sum(input_length.tolist())
# streaming_state.prune_source_memory(self.max_memory_size)
streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
if self.reversible:
print("* Transformer Reversible Decoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def create_context_mask(self, input, src, src_lengths, tgt_lengths, extra_context_length=0):
"""
Generate the mask so that part of the target attends to a part of the source
:param extra_context_length:
:param input:
:param src:
:param src_lengths:
:param tgt_lengths:
:return:
"""
mask = None
if self.stream_context == 'global':
# Global context: one target attends to everything in the source
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current sent attend to current src sent and all src in the past
current_mask = input.new_zeros(tgt_length, src_length + prev_src_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
elif self.stream_context in ['local', 'limited']:
# Local context: only attends to the aligned context
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current tgt sent attend to only current src sent
if prev_src_length > 0:
current_mask = torch.cat([input.new_ones(tgt_length, prev_src_length - extra_context_length),
input.new_zeros(tgt_length, src_length + extra_context_length)], dim=-1)
else:
current_mask = input.new_zeros(tgt_length, src_length + extra_context_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
mask = mask.bool()
return mask
def create_self_attn_mask(self, input, tgt_lengths, prev_tgt_mem_size):
"""
Create a mask for the target words attending to the past
:param input:
:param tgt_lengths:
:param prev_tgt_mem_size:
:return:
"""
if self.stream_context in ['local', 'global']:
qlen = sum(tgt_lengths.tolist())
mlen = prev_tgt_mem_size
klen = qlen + mlen
mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen).bool()[:, :, None]
elif self.stream_context in ['limited']:
# limited means that every sentence only pay attention to the extra memory size
extra_mem_len = self.max_memory_size
# past_length = prev_tgt_mem_size
mask = None
memory_size = prev_tgt_mem_size
for length in tgt_lengths:
past_length = mask.size(0) if mask is not None else 0
qlen = length
mlen = min(memory_size, self.max_memory_size)
klen = qlen + mlen
cur_attn_mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen)
# for the rest of the past sequence: don't look at them
if mlen < memory_size:
no_attn_mask = input.new_ones(qlen, memory_size - mlen)
cur_attn_mask = torch.cat([no_attn_mask, cur_attn_mask], dim=1)
if mask is not None:
prev_q, prev_k = mask.size(0), mask.size(1)
# the past doesn't look at future
prev_mask = input.new_ones(prev_q, qlen)
mask = torch.cat([mask, prev_mask], dim=1) # first, concatenate for the K dim
mask = torch.cat([mask, cur_attn_mask], dim=0) # concatenate for the Q dim
else:
mask = cur_attn_mask
memory_size = mask.size(1)
mask = mask.bool().unsqueeze(-1)
return mask
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, tgt_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if streaming:
src_lengths = kwargs.get("src_lengths", None)
tgt_lengths = kwargs.get("tgt_lengths", None)
streaming_state = kwargs.get("streaming_state")
mems = streaming_state.tgt_mems
extra_context = streaming_state.extra_context
extra_context_length = extra_context.size(0) if extra_context is not None else 0
# mem_len = streaming_state.prev_tgt_mem_size
mem_len = mems[0].size(0) if mems is not None else 0
else:
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
if streaming:
context_attn_mask = self.create_context_mask(input, src,
src_lengths, tgt_lengths,
extra_context_length)
mask_src = context_attn_mask.unsqueeze(0)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
if streaming:
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, mem_len)
else:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
#
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# dec_attn_mask = dec_attn_mask.gt(0)
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
output = self.preprocess_layer(emb.contiguous())
if streaming:
hids = [output]
if extra_context is not None:
context = torch.cat([extra_context, context], dim=0)
pos_emb = self.preprocess_layer(pos_emb)
if self.reversible:
output = torch.cat([output, output], dim=-1)
output = ReversibleDecoderFunction.apply(output, pos_emb, context, self.layer_modules,
dec_attn_mask, mask_src)
coverage = None
else:
for i, layer in enumerate(self.layer_modules):
# batch_size x src_len x d_model output, coverage = layer(output, context, pos_emb, self.r_w_bias,
# self.r_r_bias, dec_attn_mask, mask_src)
mems_i = mems[i] if mems is not None and streaming and \
self.stream_context in ['local', 'global'] and self.max_memory_size > 0 else None
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src, mems=mems_i)
if streaming:
hids.append(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
if streaming:
# streaming_state.prev_tgt_mem_size += sum(tgt_lengths.tolist())
# streaming_state.prune_target_memory(self.max_memory_size)
# if we use the extra context: keep the last context
if self.extra_context_size > 0:
extra_context = context[-self.extra_context_size:].detach()
streaming_state.extra_context = extra_context
if self.stream_context in ['local', 'global']:
streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
if streaming:
return self.step_streaming(input, decoder_state)
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
if not buffering:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
else:
dec_attn_mask = None
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
if self.reversible:
output_1, output_2 = output, output
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if self.reversible:
if buffering:
output_1, output_2, coverage, buffer = layer(output_1, output_2, pos_emb, context,
dec_attn_mask, mask_src, incremental=True,
incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output_1, output_2, coverage, _ = layer(output_1, output_2, pos_emb, context,
dec_attn_mask, mask_src)
else:
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
if self.reversible:
output = output_1 + output_2
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
def step_streaming(self, input, decoder_state):
"""Step function in streaming case"""
context = decoder_state.context
lang = decoder_state.tgt_lang
streaming_state = decoder_state.streaming_state
# for global model: push the context in
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1) # B x T to T x B
klen = input.size(0)
# If we start a new sentence to decode: reset the context memory
if klen == 1:
streaming_state.reset_context_memory()
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# need to manually definte src_lengths and tgt_lengths here
src_lengths = torch.LongTensor([context.size(0)])
tgt_lengths = torch.LongTensor([1])
if context is not None:
context_attn_mask = self.create_context_mask(input, src, src_lengths, tgt_lengths)
context_attn_mask = context_attn_mask.unsqueeze(0)
else:
context_attn_mask = None
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, streaming_state.prev_tgt_mem_size)
dec_attn_mask = dec_attn_mask[:, -1:, :]
klen = 1 + streaming_state.prev_tgt_mem_size
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
for i, layer in enumerate(self.layer_modules):
# T x B x d_model
buffer = streaming_state.tgt_buffer[i]
# output, coverage = layer(output, context, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask, mask_src)
# reuse_source = True if input.size(1) == 1 else False
reuse_source = True
# reuse source is True in this case because we can reuse the context ...
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, context_attn_mask,
incremental=True, incremental_cache=buffer, reuse_source=reuse_source)
streaming_state.tgt_buffer[i] = buffer
output = self.postprocess_layer(output)
streaming_state.prev_tgt_mem_size += 1
streaming_state.prune_target_memory(self.max_memory_size)
extra_context = context[-self.extra_context_size:].detach()
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeTransformer(Transformer):
def create_decoder_state(self, batch, beam_size=1, type=1, streaming=False, previous_decoding_state=None,
factorize=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param factorize:
:param pretrained_layer_states:
:param previous_decoding_state:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
# in this case batch size should be 1
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src_transposed = src.transpose(0, 1)
if previous_decoding_state is None:
# if the previous stream is None (the first segment in the stream)
# then proceed normally like normal translation
# init a new stream state
streaming_state = self.init_stream() if streaming else None
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=streaming, streaming_state=streaming_state,
factorize=factorize, pretrained_layer_states=pretrained_layer_states)
if streaming:
decoder_state = StreamDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True, streaming_state=streaming_state)
else:
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type)
else:
streaming_state = previous_decoding_state.streaming_state
# to have the same batch/beam size with the previous memory ..
src_transposed = src_transposed.repeat(beam_size, 1)
src = src.repeat(1, beam_size)
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=True, streaming_state=streaming_state)
context = encoder_output['context']
if self.decoder.extra_context_size > 0:
# print("Using extra context with extra %d states" % self.decoder.extra_context_size)
# print("")
prev_context = previous_decoding_state.context
extra_context = prev_context[-self.decoder.extra_context_size:].detach()
context = torch.cat([extra_context, context], dim=0)
prev_src = previous_decoding_state.src[-self.decoder.extra_context_size:].detach()
src = torch.cat([prev_src, src], dim=0)
decoder_state = StreamDecodingState(src, tgt_lang, context,
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=False, streaming_state=streaming_state)
return decoder_state
def init_stream(self):
param = next(self.parameters())
layers = self.decoder.layers
streaming_state = StreamState(layers, self.decoder.max_memory_size, param.device, param.dtype)
return streaming_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob.float(), dim=-1)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.encoder.max_memory_size = src_memory_size
self.decoder.max_memory_size = tgt_memory_size
class StreamState(object):
def __init__(self, nlayers, mem_len, device, dtype, training=True):
# Currently I implement two types of stream states
self.src_buffer = defaultdict(lambda: None)
self.prev_src_mem_size = 0
self.src_lengths = []
self.tgt_buffer = defaultdict(lambda: None)
self.prev_tgt_mem_size = 0
self.tgt_lengths = []
self.training = training
self.mem_len = mem_len
self.nlayers = nlayers
if self.training:
# initialize the memory
self.src_mems = []
self.tgt_mems = []
for i in range(self.nlayers + 1):
empty = torch.empty(0, dtype=dtype, device=device)
self.src_mems.append(empty)
empty = torch.empty(0, dtype=dtype, device=device)
self.tgt_mems.append(empty)
self.extra_context = None
self.context_memory = None
def prune_source_memory(self, mem_size):
pruning = mem_size < self.prev_src_mem_size
self.prev_src_mem_size = min(mem_size, self.prev_src_mem_size)
if pruning:
for i in self.src_buffer:
if self.src_buffer[i] is not None:
for key in self.src_buffer[i]:
self.src_buffer[i][key] = self.src_buffer[i][key][-mem_size:]
def prune_target_memory(self, mem_size):
pruning = mem_size < self.prev_tgt_mem_size
self.prev_tgt_mem_size = min(mem_size, self.prev_tgt_mem_size)
if pruning:
for i in self.tgt_buffer:
if self.tgt_buffer[i] is not None:
for key in self.tgt_buffer[i]:
# Don't prune the buffer for enc-dec context, only prune the memory
if key not in ['c_k', 'c_v']:
self.tgt_buffer[i][key] = self.tgt_buffer[i][key][-mem_size:]
def get_beam_buffer(self, beam_id):
buffer = dict()
for i in self.tgt_buffer:
buffer[i] = dict()
buffer[i]['v'] = self.tgt_buffer[i]['v'].index_select(1, beam_id) # the batch dim is 1
buffer[i]['k'] = self.tgt_buffer[i]['k'].index_select(1, beam_id)
return buffer
def set_beam_buffer(self, sent_states):
# assert(len(sent_states) == len(self.tgt_buffer))
tensor = self.tgt_buffer[0]['v']
hidden_size = tensor.size(-1)
# first let's try with min_length
beam_size = len(sent_states)
min_length = min([sent_states[b]['hidden_buffer'][0]['k'].size(0) for b in range(beam_size)])
mem_length = min_length
for l in self.tgt_buffer:
self.tgt_buffer[l]['v'] = tensor.new(mem_length, beam_size, hidden_size).zero_()
self.tgt_buffer[l]['k'] = tensor.new(mem_length, beam_size, hidden_size).zero_()
for b in range(beam_size):
self.tgt_buffer[l]['v'][:, b, :].copy_(sent_states[b]['hidden_buffer'][l]['v'][-mem_length:, 0])
self.tgt_buffer[l]['k'][:, b, :].copy_(sent_states[b]['hidden_buffer'][l]['k'][-mem_length:, 0])
# When we start a sentence a new, the context key and value buffers need to be reset
def reset_context_memory(self):
for l in self.tgt_buffer:
buffer_ = self.tgt_buffer[l]
buffer_.pop('c_k', None)
buffer_.pop('c_v', None)
def reset_target_memory(self):
for l in self.tgt_buffer:
buffer_ = self.tgt_buffer[l]
buffer_.pop('k', None)
buffer_.pop('v', None)
self.prev_tgt_mem_size = 0
def update_src_mems(self, hids, qlen):
# does not deal with None
if self.src_mems is None:
return None
mlen = self.src_mems[0].size(0) if self.src_mems is not None else 0
# mems is not None
assert len(hids) == len(self.src_mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.src_mems[i], hids[i]], dim=0)
extra_mem = cat[beg_idx:end_idx].detach()
new_mems.append(extra_mem)
# Important:
self.src_mems = new_mems
def update_tgt_mems(self, hids, qlen):
# does not deal with None
if self.tgt_mems is None:
return None
mlen = self.tgt_mems[0].size(0) if self.tgt_mems is not None else 0
# mems is not None
assert len(hids) == len(self.tgt_mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.tgt_mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
# Important:
self.tgt_mems = new_mems
class StreamDecodingState(DecoderState):
# We need to somehow create the state w.r.t the previous states of the encoder and decoder
def __init__(self, src, tgt_lang, context, src_mask, beam_size=1, model_size=512,
cloning=True, streaming_state=None, **kwargs):
self.beam_size = beam_size
self.model_size = model_size
self.src_mask = None
# self.attention_buffers = dict()
self.streaming_state = streaming_state
bsz = src.size(1) # this value should be 1 for
if cloning:
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
self.src = src.index_select(1, new_order) # because src is batch first
else:
self.context = context
self.src = src
self.concat_input_seq = False
self.tgt_lang = tgt_lang
self.origin = torch.arange(self.beam_size).to(src.device)
# to know where each hypothesis comes from the previous beam
def get_beam_buffer(self, beam_id):
return self.streaming_state.get_beam_buffer(beam_id)
def set_beam_buffer(self, sent_states):
return self.streaming_state.set_beam_buffer(sent_states)
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer # dict of 2 keys (k, v) : T x B x H
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
if self.src_mask is not None:
self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.streaming_state.src_buffer:
buffer_ = self.streaming_state.src_buffer[l]
if buffer_ is not None:
for k in buffer_.keys():
if buffer_[k] is not None:
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
for l in self.streaming_state.tgt_buffer:
buffer_ = self.streaming_state.tgt_buffer[l]
if buffer_ is not None:
for k in buffer_.keys():
if buffer_[k] is not None:
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
if self.streaming_state.src_mems is not None:
for l in range(len(self.streaming_state.src_mems)):
mems = self.streaming_state.src_mems[l]
if mems.size(0) > 0:
self.streaming_state.src_mems[l] = mems.index_select(1, reorder_state)
if self.streaming_state.tgt_mems is not None:
for l in range(len(self.streaming_state.tgt_mems)):
mems = self.streaming_state.tgt_mems[l]
if mems.size(0) > 0:
self.streaming_state.tgt_mems[l] = mems.index_select(1, reorder_state)
if self.streaming_state.context_memory is not None:
self.streaming_state.context_memory = self.streaming_state.context_memory.index_select(1, reorder_state)
self.origin = self.origin.index_select(0, reorder_state)
def prune_complete_beam(self, active_idx, remaining_sents):
pass
def update_beam(self, beam, b, remaining_sents, idx):
pass
| 49,047
| 40.11316
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/relative_transformer_layers.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
class RelativeTransformerEncoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if not self.fast_self_attention:
self.multihead = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size,
d_head, dropatt=opt.attn_dropout)
else:
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
print (opt.fast_feed_forward)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# memory for transformer-xl caching
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if not self.fast_self_attention:
out, _, incremental_cache = self.multihead(query, pos_emb, attn_mask=attn_mask, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.fast_xattention = opt.fast_xattention
# self.lfv_multilingual = opt.lfv_multilingual
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_xattention:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if not self.fast_self_attention:
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size, d_head,
dropatt=opt.attn_dropout)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
# if not opt.fast_feed_forward:
# feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
# self.feedforward = Bottle(feedforward)
# else:
# self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
# variational=self.variational)
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
self.feedforward = Bottle(feedforward)
# if opt.lfv_multilingual:
# self.lid_net = lid_net
# self.lfv_mapper = nn.Linear(opt.bottleneck_size, opt.model_size)
# else:
# self.lid_net = None
# self.lfv_mapper = None
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, mems=mems,
incremental=incremental,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
| 10,207
| 44.775785
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/lstm.py
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict
import math
import onmt
from onmt.modules.base_seq2seq import NMTModel, DecoderState
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.dropout import embedded_dropout, switchout
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
import random
import time
class SpeechLSTMEncoder(nn.Module):
def __init__(self, opt, embedding, encoder_type='audio'):
super(SpeechLSTMEncoder, self).__init__()
self.opt = opt
self.model_size = opt.model_size
if hasattr(opt, 'encoder_layers') and opt.encoder_layers != -1:
self.layers = opt.encoder_layers
else:
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.input_type = encoder_type
self.cnn_downsampling = opt.cnn_downsampling
self.switchout = 0.0 # for speech it has to be
self.varitional_dropout = opt.variational_dropout
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.time = opt.time
self.lsh_src_attention = opt.lsh_src_attention
self.reversible = opt.src_reversible
self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.mfw_rank = opt.mfw_rank
feature_size = opt.input_size
self.channels = 1
if opt.upsampling:
feature_size = feature_size // 4
if not self.cnn_downsampling:
self.audio_trans = nn.Linear(feature_size, self.model_size)
torch.nn.init.xavier_uniform_(self.audio_trans.weight)
else:
channels = self.channels
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
feat_size = (((feature_size // channels) - 3) // 4) * 32
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
self.unidirect = False
self.rnn = nn.LSTM(input_size=self.model_size, hidden_size=self.model_size, num_layers=self.layers,
bidirectional=(not self.unidirect), bias=True, dropout=self.dropout, batch_first=True)
if self.multilingual_factorized_weights:
from onmt.modules.weight_control_lstm import WeightFactoredLSTM
self.rnn = WeightFactoredLSTM(self.rnn, dropout=opt.weight_drop, n_languages=opt.n_languages,
rank=self.mfw_rank, multiplicative=opt.mfw_multiplicative,
activation=opt.mfw_activation)
elif opt.weight_drop > 0:
from onmt.modules.weight_control_lstm import WeightDrop
weight_list = list()
for i in range(self.layers):
weight_list.append('weight_hh_l%d' % i)
weight_list.append('weight_hh_l%d_reverse' % i)
self.rnn = WeightDrop(self.rnn, weight_list, dropout=opt.weight_drop)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.varitional_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
def rnn_fwd(self, seq, mask, hid, src_lang=None):
"""
:param src_lang:
:param seq:
:param mask:
:param hid:
:return:
"""
if mask is not None:
lengths = mask.sum(-1).float().cpu()
seq = pack_padded_sequence(seq, lengths, batch_first=True, enforce_sorted=False)
if self.multilingual_factorized_weights:
seq, hid = self.rnn(seq, hid, indices=src_lang)
else:
seq, hid = self.rnn(seq, hid)
seq = pad_packed_sequence(seq, batch_first=True)[0]
else:
if self.multilingual_factorized_weights:
seq, hid = self.rnn(seq, hid, indices=src_lang)
else:
seq, hid = self.rnn(seq, hid)
return seq, hid
def forward(self, input, input_pos=None, input_lang=None, hid=None,
return_states=False, pretrained_layer_states=None, **kwargs):
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).gt(onmt.constants.PAD)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).gt(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4]
dec_attn_mask = ~mask_src
# the size seems to be B x T ?
emb = input
layer_states = dict()
seq, hid = self.rnn_fwd(emb, mask_src, hid, src_lang=input_lang)
if not self.unidirect:
hidden_size = seq.size(2) // 2
seq = seq[:, :, :hidden_size] + seq[:, :, hidden_size:]
if return_states:
layer_states[0] = seq
# Summing the context
if pretrained_layer_states is not None:
seq = seq + pretrained_layer_states[0]
# layer norm
seq = self.postprocess_layer(seq)
output_dict = {'context': seq.transpose(0, 1), 'src_mask': dec_attn_mask}
if return_states:
output_dict['layer_states'] = layer_states
return output_dict
class SpeechLSTMDecoder(nn.Module):
def __init__(self, opt, embedding, language_embeddings=None, **kwargs):
super(SpeechLSTMDecoder, self).__init__()
# Keep for reference
# Define layers
self.model_size = opt.model_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.variational_dropout = opt.variational_dropout
self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.mfw_rank = opt.mfw_rank
self.encoder_type = opt.encoder_type
self.n_languages = opt.n_languages
self.lstm = nn.LSTM(self.model_size, self.model_size, self.layers, dropout=self.dropout, batch_first=True)
if self.multilingual_factorized_weights:
from onmt.modules.weight_control_lstm import WeightFactoredLSTM
self.lstm = WeightFactoredLSTM(self.lstm, dropout=opt.weight_drop, n_languages=opt.n_languages,
rank=self.mfw_rank)
elif opt.weight_drop > 0:
from onmt.modules.weight_control_lstm import WeightDrop
# todo: change so that dropout applied on all layers
weight_list = list()
for i in range(self.layers):
weight_list.append('weight_hh_l%d' % i)
self.lstm = WeightDrop(self.lstm, weight_list, dropout=opt.weight_drop)
self.fast_xattention = opt.fast_xattention
self.n_head = 1 # fixed to always use 1 head
# also fix attention dropout to 0.0
if self.multilingual_factorized_weights:
self.fast_xattention = True
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
self.multihead_tgt = MFWEncdecMultiheadAttn(self.n_head, opt.model_size, 0.0, n_languages=opt.n_languages,
rank=opt.mfw_rank, weight_drop=0.0)
else:
if opt.fast_xattention:
self.multihead_tgt = EncdecMultiheadAttn(self.n_head, opt.model_size, 0.0)
else:
self.multihead_tgt = MultiHeadAttention(self.n_head, opt.model_size, attn_p=0.0, share=3)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.variational_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.preprocess_attn = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = embedding
self.encoder_cnn_downsampling = opt.cnn_downsampling
self.language_embeddings = language_embeddings
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
if self.language_embedding_type == 'concat':
self.projector = nn.Linear(opt.model_size * 2, opt.model_size)
print("* Create LSTM Decoder with %d layers." % self.layers)
def process_embedding(self, input, input_lang=None):
return input
def step(self, input, decoder_state, **kwargs):
context = decoder_state.context
buffer = decoder_state.lstm_buffer
attn_buffer = decoder_state.attention_buffers
hid = buffer["hidden_state"]
cell = buffer["cell_state"]
tgt_lang = decoder_state.tgt_lang
buffering = decoder_state.buffering
if hid is not None:
hid_cell = (hid, cell)
else:
hid_cell = None
lang = decoder_state.tgt_lang
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
emb = self.word_lut(input_)
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
# print("Using language embedding")
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
dec_emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
dec_emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
else:
dec_emb = emb
if context is not None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
# if input_.size(0) > 1 and input_.size(1) > 1:
#
# lengths = input.gt(onmt.constants.PAD).sum(-1)
#
# dec_in = pack_padded_sequence(dec_emb, lengths, batch_first=True, enforce_sorted=False)
#
# dec_out, hidden = self.lstm(dec_in, hid_cell)
# dec_out = pad_packed_sequence(dec_out, batch_first=True)[0]
# else:
if self.multilingual_factorized_weights:
dec_out, hid_cell = self.lstm(dec_emb, hid_cell, indices=tgt_lang)
else:
dec_out, hid_cell = self.lstm(dec_emb, hid_cell)
decoder_state.update_lstm_buffer(hid_cell)
lt = input_.size(1)
attn_mask = mask_src.expand(-1, lt, -1) if not self.fast_xattention else mask_src.squeeze(1)
# dec_out = self.postprocess_layer(dec_out)
dec_out = self.preprocess_attn(dec_out)
dec_out = dec_out.transpose(0, 1)
if buffering:
buffer = attn_buffer[0]
if buffer is None:
buffer = dict()
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, context, context, tgt_lang, tgt_lang, attn_mask,
incremental=True, incremental_cache=buffer)
else:
output, coverage = self.multihead_tgt(dec_out, context, context, attn_mask,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, 0)
else:
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, context, context, tgt_lang, tgt_lang, attn_mask)
else:
output, coverage = self.multihead_tgt(dec_out, context, context, attn_mask)
output = (output + dec_out)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
return output_dict
def forward(self, dec_seq, enc_out, src, tgt_lang=None, hid=None, **kwargs):
emb = embedded_dropout(self.word_lut, dec_seq, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
# print("Using language embedding")
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
dec_emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
dec_emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
else:
dec_emb = emb
if enc_out is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0: enc_out.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
# if dec_seq.size(0) > 1 and dec_seq.size(1) > 1:
# lengths = dec_seq.gt(onmt.constants.PAD).sum(-1)
# dec_in = pack_padded_sequence(dec_emb, lengths, batch_first=True, enforce_sorted=False)
# dec_out, hid = self.lstm(dec_in, hid)
# dec_out = pad_packed_sequence(dec_out, batch_first=True)[0]
# else:
if self.multilingual_factorized_weights:
dec_out, hid = self.lstm(dec_emb, hid, indices=tgt_lang)
else:
dec_out, hid = self.lstm(dec_emb, hid)
lt = dec_seq.size(1)
attn_mask = mask_src.expand(-1, lt, -1) if not self.fast_xattention else mask_src.squeeze(1)
# dec_out = self.postprocess_layer(dec_out)
dec_out = self.preprocess_attn(dec_out)
dec_out = dec_out.transpose(0, 1).contiguous()
enc_out = enc_out.contiguous()
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, enc_out, enc_out, tgt_lang, tgt_lang, attn_mask)
else:
output, coverage = self.multihead_tgt(dec_out, enc_out, enc_out, attn_mask)
output = (output + dec_out)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': enc_out})
return output_dict
class SpeechLSTMSeq2Seq(NMTModel):
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, pretrained_layer_states=None):
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths,
pretrained_layer_states=pretrained_layer_states)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
if zero_encoder:
context.zero_()
src_mask = encoder_output['src_mask']
decoder_output = self.decoder(tgt, context, src,
tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['reconstruct'] = None
output_dict['target'] = batch.get('target_output')
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def step(self, input_t, decoder_state):
output_dict = self.decoder.step(input_t, decoder_state)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
# TxB -> BxT
src_transposed = src.transpose(0, 1)
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)
decoder_state = LSTMDecodingState(src, tgt_lang, encoder_output['context'],
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state
def decode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)['context']
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
class LSTMDecodingState(DecoderState):
def __init__(self, src, tgt_lang, context, beam_size=1, model_size=512, type=2,
cloning=True, buffering=False):
self.beam_size = beam_size
self.model_size = model_size
self.lstm_buffer = dict()
self.lstm_buffer["hidden_state"] = None
self.lstm_buffer["cell_state"] = None
self.buffering = buffering
self.attention_buffers = defaultdict(lambda: None)
if type == 1:
# if audio only take one dimension since only used for mask
# raise NotImplementedError
self.original_src = src # TxBxC
self.concat_input_seq = True
if src is not None:
if src.dim() == 3:
self.src = src.narrow(2, 0, 1).squeeze(2).repeat(1, beam_size)
# self.src = src.repeat(1, beam_size, 1) # T x Bb x c
else:
self.src = src.repeat(1, beam_size)
else:
self.src = None
if context is not None:
self.context = context.repeat(1, beam_size, 1)
else:
self.context = None
self.input_seq = None
self.tgt_lang = tgt_lang
elif type == 2:
bsz = src.size(1) # src is T x B
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if cloning:
self.src = src.index_select(1, new_order) # because src is batch first
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
else:
self.context = context
self.src = src
self.input_seq = None
self.concat_input_seq = False
self.tgt_lang = tgt_lang
else:
raise NotImplementedError
def update_lstm_buffer(self, buffer):
hid, cell = buffer
# hid and cell should have size [n_layer, batch_size, hidden_size]
self.lstm_buffer["hidden_state"] = hid
self.lstm_buffer["cell_state"] = cell
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer
def update_beam(self, beam, b, remaining_sents, idx):
if self.beam_size == 1:
return
# print(self.input_seq)
# print(self.src.shape)
for tensor in [self.src, self.input_seq]:
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(1, beam[b].getCurrentOrigin()))
for l in self.lstm_buffer:
buffer_ = self.lstm_buffer[l]
t_, br_, d_ = buffer_.size()
sent_states = buffer_.view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffers = self.attention_buffers[l]
if buffers is not None:
for k in buffers.keys():
buffer_ = buffers[k]
t_, br_, d_ = buffer_.size()
sent_states = buffer_.view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(1, beam[b].getCurrentOrigin()))
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active_with_hidden(t):
if t is None:
return t
dim = t.size(-1)
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_without_hidden(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active_with_hidden(self.context)
self.input_seq = update_active_without_hidden(self.input_seq)
if self.src.dim() == 2:
self.src = update_active_without_hidden(self.src)
elif self.src.dim() == 3:
t = self.src
dim = t.size(-1)
view = t.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
self.src = new_t
for l in self.lstm_buffer:
buffer_ = self.lstm_buffer[l]
buffer = update_active_with_hidden(buffer_)
self.lstm_buffer[l] = buffer
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
buffer_[k] = update_active_with_hidden(buffer_[k])
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
# if self.src_mask is not None:
# self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
for k in self.lstm_buffer:
buffer_ = self.lstm_buffer[k]
if buffer_ is not None:
self.lstm_buffer[k] = buffer_.index_select(1, reorder_state) # 1 because the first dim is n_layer
| 30,801
| 39.002597
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/perceiver.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.modules.checkpoint import checkpoint
# from torch.utils.checkpoint import checkpoint
from onmt.modules.identity import Identity
torch.set_printoptions(threshold=500000)
class SpeechTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.checkpointing = opt.checkpointing
self.mpw = opt.multilingual_partitioned_weights
self.multilingual_linear_projection = opt.multilingual_linear_projection
self.mln = opt.multilingual_layer_norm
self.no_input_scale = opt.no_input_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
# TODO: multilingually linear transformation
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
# raise NotImplementedError
self.positional_encoder = None
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if self.multilingual_linear_projection:
self.linear_proj = nn.Parameter(torch.Tensor(opt.n_languages, self.model_size, self.model_size))
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
torch.nn.init.normal_(self.linear_proj, 0.0, std_)
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
| 3,242
| 42.24
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/classifier.py
|
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
class TransformerClassifier(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, generator=None, mpc=False, **kwargs):
super().__init__()
self.encoder = encoder
self.generator = generator
self.num_classes = self.generator[0].linear.weight.size(0)
self.mpc = mpc
if mpc:
input_size = self.encoder.opt.input_size
model_size = self.encoder.opt.model_size
self.mpc_linear = nn.Linear(model_size, input_size)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
def forward(self, batch, *args, **kwargs):
if self.mpc and self.training:
# mask inputs with p=20%
batch.mask_mpc(p=0.2)
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1) # transpose to have batch first
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)
# feed the encoder output to generator? Or average per frame?
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = context
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = encoder_output['src_mask']
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# masked predictive coding
if self.mpc:
# mpc reconstruction
mpc_rec = self.mpc_linear(context)
output_dict['mpc'] = mpc_rec
output_dict['masked_positions'] = batch.get('masked_positions')
output_dict['original_source'] = batch.get('original_source')
return output_dict
def encode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1) # transpose to have batch first
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths,
return_states=True)
layer_states = encoder_output['layer_states']
return layer_states
| 2,848
| 30.655556
| 107
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/lid_loss.py
|
import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
import onmt.modules
from onmt.utils import flip
class CrossEntropyLIDLoss(_Loss):
"""
Class for managing efficient loss computation.
loss computations
Users can implement their own loss computation strategy by making
subclass of this one.
Args:
output_size: number of words in vocabulary()
"""
def __init__(self, output_size, label_smoothing):
super().__init__()
self.output_size = output_size
self.padding_idx = -1
self.smoothing_value = label_smoothing
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
# use apex fast entropy implementation
self.fast_xentropy = fast_xentropy = False
self.fast_xentropy = False
try:
import xentropy_cuda
from onmt.modules.optimized.softmax_xentropy import SoftmaxCrossEntropyLoss
self.softmax_xentropy = SoftmaxCrossEntropyLoss.apply
self.fast_xentropy = True
except (ModuleNotFoundError, AttributeError):
self.softmax_xentropy = None
self.fast_xentropy = False
def forward(self, lid_logits, labels, mask):
"""
:param lid_logits: list of [T x B x L] logits
:param mask: [B x T]
:return:
"""
# here we should use logits instead of softmax/logsoftmax
# prediction is done before the first Transformer layers
len_t, bsz = lid_logits.size(0), lid_logits.size(1)
# labels = labels.unsqueeze(0).unsqueeze(0).repeat(n_layers, len_t, 1)
# labels can have three different forms:
if labels.ndim == 1 and labels.size(0) == 1:
labels = labels.unsqueeze(0).repeat(len_t, bsz)
elif labels.ndim == 1 and labels.size(0) == bsz:
labels = labels.unsqueeze(0).repeat(len_t)
elif labels.ndim == 2:
assert labels.size(0) == len_t, labels.size(1) == bsz
else:
raise NotImplementedError
# mask should be [B x T] -> [T x B]
mask = mask.transpose(0, 1)
# next we need to remove padding from labels and logits
# print(lid_logits.size(), labels.size(), mask.size())
logits = lid_logits.view(-1, lid_logits.size(-1))
gtruth = labels.view(-1)
padding_mask = mask.contiguous().long()
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
# print(logits.size(), gtruth.size(), non_pad_indices.size())
logits = logits.index_select(0, non_pad_indices)
gtruth = gtruth.index_select(0, non_pad_indices)
label_smoothing = self.label_smoothing if self.training else 0.0
eps_i = self.smoothing_value if self.training else 0.0
# print(logits.size(), gtruth.size())
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
nll_loss = -lprobs.gather(1, gtruth.unsqueeze(1))
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
loss = (1. - label_smoothing) * nll_loss + eps_i * smooth_loss
# if not self.fast_xentropy:
#
# else:
# half_to_float = (logits.dtype == torch.half)
# loss = self.softmax_xentropy(logits, gtruth, label_smoothing, self.padding_idx, half_to_float)
# loss = loss.sum()
return loss
| 3,563
| 32.308411
| 108
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/conformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, DecoderState
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder, LSTMDecodingState
from onmt.modules.convolution import Conv2dSubsampling
from onmt.models.transformer_layers import PrePostProcessing
from onmt.models.discourse.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from .conformer_layers import ConformerEncoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
class ConformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# position encoding sin/cos
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.audio_trans = Conv2dSubsampling(opt.input_size, opt.model_size)
channels = self.channels
feature_size = opt.input_size
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
# cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True),
# nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True)]
nn.init.kaiming_normal_(cnn[0].weight, nonlinearity="relu")
nn.init.kaiming_normal_(cnn[3].weight, nonlinearity="relu")
feat_size = (((feature_size // channels) - 3) // 4) * 32
# cnn.append()
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Conformer Encoder with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = ConformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
:param input: [B x T x Input_Size]
:param input_pos: [B x T] positions
:param input_lang: [B] language ids of each sample
:param streaming: connect different segments in transformer-xl style
:param kwargs:
:return:
"""
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first subsampling
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2) # [bsz, channels, time, f]
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
emb = input
mask_src = long_mask[:, 0:emb.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
# emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
if self.learnable_position_encoding:
raise NotImplementedError
context = emb
# Apply dropout to pos_emb
# context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
return output_dict
class Conformer(NMTModel):
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False):
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_lang=src_lang)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
if zero_encoder:
context.zero_()
src_mask = encoder_output['src_mask']
decoder_output = self.decoder(tgt, context, src,
tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['reconstruct'] = None
output_dict['target'] = batch.get('target_output')
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def step(self, input_t, decoder_state):
output_dict = self.decoder.step(input_t, decoder_state)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# TxB -> BxT
src_transposed = src.transpose(0, 1)
encoder_output = self.encoder(src_transposed, input_lang=src_lang)
decoder_state = LSTMDecodingState(src, tgt_lang, encoder_output['context'],
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state
def decode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src)['context']
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
| 12,149
| 37.571429
| 126
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/wav2vec2.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2vec2.file_io import PathManager
from omegaconf import DictConfig, open_dict, OmegaConf
from .fairseq_wav2vec2.utils import overwrite_args_by_name
#
# # maybe just need d / F.normalize(d, p=2, dim=2)
#
# def norm_vec_sentence_level(d, xp):
# # d : (max_len, batchsize, emb_dim)
# # trans_d : (batchsize, max_len, emb_dim)
# trans_d = xp.transpose(d, (1, 0, 2))
# norm_term = xp.linalg.norm(trans_d, axis=(1, 2), keepdims=True) + 1e-12
# trans_d = trans_d / norm_term
# d_sent_norm = xp.transpose(trans_d, (1, 0, 2))
# return d_sent_norm
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
state["cfg"] = OmegaConf.create(state["cfg"])
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
# state = _upgrade_state_dict(state)
return state
# defining a Wav2vec2 encoder wrapping the HuggingFace model here
class FairseqWav2VecExtractor(nn.Module):
def __init__(self, model_path="wav2vec_vox_new.pt"):
self.model_path = model_path
import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
super().__init__()
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg)
self.wav2vec_encoder.load_state_dict(state['model'])
self.wav2vec_encoder.remove_pretraining_modules()
def forward(self, batch, **kwargs):
"""
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
input = batch.get('source').transpose(0, 1) # T x B x H -> B x T x H
# 0 for tokens that are not masked, 1 for tokens that are masked
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1).squeeze(-1)
attn_mask = long_mask
# wav2vec_output = self.wav2vec_encoder.extract_features(input, attn_mask, mask=self.training)
features, padding_mask = self.wav2vec_encoder.extract_conv_features(input, attn_mask)
return features, padding_mask
class FairseqWav2VecQuantizer(nn.Module):
def __init__(self, model_path="wav2vec_vox_new.pt"):
self.model_path = model_path
# import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
super().__init__()
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg)
self.wav2vec_encoder.load_state_dict(state['model'])
def forward(self, batch, **kwargs):
"""
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
input = batch.get('source').transpose(0, 1) # T x B x H -> B x T x H
# 0 for tokens that are not masked, 1 for tokens that are masked
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1).squeeze(-1)
attn_mask = long_mask
wav2vec_output = self.wav2vec_encoder(input, attn_mask, mask=False,
quantize=True, quantize_only=True,
)
codes = wav2vec_output['quantized_target']
padding_mask = wav2vec_output['padding_mask']
return codes, padding_mask
class FairseqWav2Vec(nn.Module):
def __init__(self, opt, model_path="wav2vec_vox_new.pt",
stacked_encoder=None, **kwargs):
super().__init__()
# do we need opt for this?
self.opt = opt
self.model_path = model_path
# import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
# from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Model
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
# don't override the options for wav2vec yet (some of them can create NaN)
self.cfg.dropout = self.opt.enc_pretrain_emb_dropout
# self.cfg.activation_dropout = self.opt.ffn_dropout
self.cfg.attention_dropout = self.opt.enc_pretrain_hidden_dropout
self.cfg.encoder_layerdrop = self.opt.death_rate
# self.cfg.dropout_features = self.opt.emb_dropout
# self.cfg.mask_channel_before = True
self.cfg.mask_channel_prob = 0.2 if self.opt.wav2vec_spec_augment else 0.0
self.cfg.mask_channel_length = 64
self.cfg.mask_prob = 0.0
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg, favor=opt.favor_attention,
weight_drop=opt.weight_drop,
predict_language=opt.predict_language,
n_languages=opt.n_languages)
self.favor = opt.favor_attention
if self.favor:
from onmt.modules.performer import ProjectionUpdater
self.proj_updater = ProjectionUpdater(self.wav2vec_encoder.encoder,
feature_redraw_interval=1000)
self.auto_check_redraw = True
# load wav2vec weights
wav2vec_weights = state['model']
existed_weights = self.wav2vec_encoder.state_dict()
# if we add new weights/buffers to new model then put them into the state_dict
keys = existed_weights.keys()
for key in keys:
if key not in wav2vec_weights:
wav2vec_weights[key] = existed_weights[key]
self.wav2vec_encoder.load_state_dict(state['model'])
removing_quantizer = not opt.wav2vec2_quantize
# remove the quantization modules
# print("removing quantization modules", removing_quantizer)
self.wav2vec_encoder.remove_pretraining_modules(removing_quantizer=removing_quantizer)
cfg = self.wav2vec_encoder.cfg
assert self.opt.model_size == cfg.encoder_embed_dim, \
"Expect self.opt.model_size (%d) and cfg.encoder_embed_dim (%d) to equal " \
% (self.opt.model_size, cfg.encoder_embed_dim)
self.input_type = self.opt.encoder_type
self.model_size = cfg.encoder_embed_dim
self.wav2vec_encoder.feature_grad_mult = 0.0
self.time = None
self.quantize = opt.wav2vec2_quantize
self.dual_output = opt.wav2vec2_dual_output and self.quantize
if stacked_encoder is not None:
self.wav2vec_encoder.add_stacked_encoder(stacked_encoder)
# freezing the parameters of the Convolutional feature extractors (by default)
for param in self.wav2vec_encoder.feature_extractor.parameters():
param.requires_grad = False
# TODO:
# add relative attention
if (hasattr(opt, 'wav2vec2_relative_attention') and opt.wav2vec2_relative_attention) or \
(hasattr(opt, 'add_relative_attention') and opt.add_relative_attention):
print("[INFO] Add relative attention for wav2vec")
self.wav2vec_encoder.add_relative_attention()
self.rotary_position_encoding = opt.rotary_position_encoding
if self.rotary_position_encoding:
assert not (hasattr(opt, 'wav2vec2_relative_attention') and opt.wav2vec2_relative_attention)
self.wav2vec_encoder.add_rotary_attention()
# freeze the whole encoder. needs to do this first before adding customized parameters
if opt.freeze_encoder:
print("[INFO] Freezing encoder parameters")
for p in self.wav2vec_encoder.parameters():
p.requires_grad = False
if opt.freeze_encoder_ffn:
self.freeze_ffn_params()
# then add factorize
if opt.multilingual_factorized_weights:
print("[INFO] Factorizing Wav2vec model into %d languages and %d factors"
% (opt.n_languages, opt.n_attributes))
self.wav2vec_encoder.encoder.add_factorize(opt.n_languages, rank=opt.mfw_rank,
multiplicative=opt.mfw_multiplicative,
fast=opt.fast_factorize)
# or adapter
if opt.wav2vec_adapter > 0:
print("[INFO] Adding adapters for Wav2vec model with %d languages" % opt.n_languages)
self.wav2vec_encoder.encoder.add_adapters(opt.n_languages, adapter_location=opt.wav2vec_adapter)
# can receive an mbart or deltalm encoder
# self.stacked_encoder = stacked_encoder
# TODO: length conversion layer
# if stacked_encoder is not None:
# self.stacked_encoder = stacked_encoder
# self.conv_downsampler = nn.ModuleList()
#
# from .fairseq_wav2vec2.fairseq_modules import TransposeLast
# from onmt.modules.layer_norm import LayerNorm
# for i in range(3):
#
# def make_conv(n_in, n_out, k, stride=2, padding=1):
# conv = nn.Conv1d(n_in, n_out, k, stride=stride, padding=padding, bias=False)
# torch.nn.init.kaiming_normal_(conv.weight)
# return conv
#
# conv = nn.Sequential(
# make_conv(self.model_size, self.model_size, 4, stride=2, padding=1),
# nn.Sequential(
# TransposeLast(),
# LayerNorm(self.model_size),
# TransposeLast(),
# ),
# nn.GELU(),
# )
#
# self.conv_downsampler.append(conv)
else:
self.stacked_encoder = None
self.conv_downsampler = None
# discrete encoder that works on top of the wav quantized output
# self.discrete_encoder = None # discrete_encoder
# if self.quantize:
# var_dim = self.wav2vec_encoder.quantizer.vars.size(-1) * self.wav2vec_encoder.quantizer.groups
# model_dim = self.model_size
# self.discrete_encoder = nn.Linear(var_dim, model_dim)
# if discrete_encoder is not None:
# assert self.quantize is True
#
# codebook_size = self.wav2vec_encoder.quantizer.num_vars ** self.wav2vec_encoder.quantizer.groups
# embed_dim = self.discrete_encoder.embed_dim
# var_dim = self.wav2vec_encoder.quantizer.vars.size(-1) * self.wav2vec_encoder.quantizer.groups
# # new embedding layer
# # self.discrete_encoder.embed_tokens = nn.Linear(var_dim, embed_dim) #nn.Embedding(codebook_size, embed_dim)
# self.discrete_encoder.embed_tokens = nn.Embedding(codebook_size, embed_dim)
# nn.init.normal_(self.discrete_encoder.embed_tokens.weight, 0.0, 0.02)
#
# # freeze the quantizer
# for param in self.wav2vec_encoder.quantizer.parameters():
# param.requires_grad = False
#
# for param in self.wav2vec_encoder.layer_norm.parameters():
# param.requires_grad = False
def fix_projection_matrices_(self):
if self.favor:
self.proj_updater.fix_projections_()
def convert_fast_attention(self):
self.wav2vec_encoder.convert_fast_attention()
def freeze_ffn_params(self):
for layer in self.wav2vec_encoder.encoder.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
def test_run(self, input, mask):
# input should have size [B x T x H]
# H == 1: audio samples
# H > 1: precomputed samples
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
wav2vec_output = self.wav2vec_encoder.extract_features(input, mask,
mask=False,
precomputed_tdnn=precomputed_tdnn,
lang=None, mixture=None)
context = wav2vec_output['x']
return context
def forward(self, input, batch_first_output=False, adv_ptb_grad=False, input_ptb=None,
lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False, **kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_ffn:
:param atb:
:param lang:
:param input_ptb: perturbation added to the input itself
:param adv_ptb_grad: adversarial perturbation step which we need the gradients w.r.t the input (wavs)
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
# The data has been constructed that the first dimension is padding mask
# 0 for tokens that are not masked, 1 for tokens that are masked
with torch.no_grad():
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1)
if adv_ptb_grad:
input.requires_grad = True
if input_ptb is not None:
assert not adv_ptb_grad
with torch.no_grad():
# normalize and add to input / maybe scale over input length?
# do this under fp32
with torch.cuda.amp.autocast(enabled=False):
epsilon = 1.0
input_ptb = input_ptb.float()
input_ptb = input_ptb / F.normalize(input_ptb, p=2.0, dim=2)
input = input.float() + input_ptb * epsilon
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
attn_mask = long_mask
if self.favor: # favor+ attention
if self.auto_check_redraw:
# print("Redraw projection ....")
self.proj_updater.redraw_projections()
quantize_only = False # self.quantize and not self.dual_output
# don't mask when precomputed tdnn is used, because spec augmentation is used in the dataset
wav2vec_output = self.wav2vec_encoder(input, attn_mask,
mask=self.training, features_only=True, layer=None,
precomputed_tdnn=precomputed_tdnn, quantize=self.quantize,
quantize_only=quantize_only,
lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
# if self.quantize:
# quantized_codebooks = wav2vec_output['quantized_target']
# encoder_input = quantized_codebooks.prod(dim=-1, keepdim=False) # .transpose(0, 1) # -> t x b x groups
# dec_attn_mask = wav2vec_output['padding_mask'] # b x t
#
# # 44204 = magic number
# additional_mask = encoder_input.eq(44204)
#
# if dec_attn_mask is not None:
# dec_attn_mask = torch.logical_or(dec_attn_mask.bool(), additional_mask)
# else:
# dec_attn_mask = additional_mask
#
# discrete_encoder_output = self.discrete_encoder(input_ids=encoder_input, attention_mask=dec_attn_mask)
# discrete_output = discrete_encoder_output[0]
# batch_size, time = discrete_output.size(1), discrete_output.size(0)
# if batch_first_output:
# discrete_output = discrete_output.transpose(0, 1).contiguous()
# batch_size, time = discrete_output.size(0), discrete_output.size(1)
# else:
# discrete_output = None
# output size is always T x B x C
continuous_output = wav2vec_output['x']
time, batch_size = continuous_output.size(0), continuous_output.size(1)
# mask size is B x T (1 for padded positions, 0 for unpadded)
dec_attn_mask = wav2vec_output['padding_mask']
if self.quantize:
quantized_output = wav2vec_output['quantized_x']
discrete_output = self.discrete_encoder(quantized_output)
discrete_output = discrete_output.transpose(0, 1).contiguous()
context = continuous_output + discrete_output
else:
context = continuous_output
if dec_attn_mask is None:
dec_attn_mask = context.new_zeros(batch_size, time).byte()
else:
dec_attn_mask = dec_attn_mask.byte()
wav2vec_context = context
wav2vec_padding_mask = dec_attn_mask
# # TODO: make the stacked encoder run here
# if self.stacked_encoder is not None:
# # assert self.conv_downsampler is not None
# #
# # # T x B x C -> B x C x T
# # context = context.transpose(0, 1).transpose(1, 2).contiguous()
# #
# # # apply convolutions to downsample the size
# # for conv in self.conv_downsampler:
# # context = conv(context)
# #
# # # B x C x T -> B x T x C
# # context = context.transpose(1, 2).contiguous()
# #
# # padding_mask = dec_attn_mask
# #
# # # TODO: recompute the padding_mask from length
# # with torch.no_grad():
# # input_lengths = (1 - padding_mask.long()).sum(-1)
# #
# # def _conv_out_length(input_length, conv):
# # kernel_size = conv.kernel_size[0]
# # stride = conv.kernel_size[0]
# # padding = conv.padding[0]
# #
# # return torch.floor((input_length - kernel_size + 2 * padding) / stride + 1)
# #
# # for conv_block in self.conv_downsampler:
# # input_lengths = _conv_out_length(
# # input_lengths, conv_block[0]
# # )
# #
# # input_lengths = input_lengths.to(torch.long)
# #
# # padding_mask = torch.zeros(
# # context.shape[:2], dtype=context.dtype, device=context.device
# # )
# #
# # padding_mask[
# # (
# # torch.arange(padding_mask.shape[0], device=padding_mask.device),
# # input_lengths - 1,
# # )
# # ] = 1
# #
# # padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
# #
# # dec_attn_mask = padding_mask
# context = context.transpose(0, 1).contiguous()
#
# # run the output through the stacked encoder
# stacked_encoder_output = self.stacked_encoder(inputs_embeds=context, attention_mask=dec_attn_mask,
# checkpointing_ffn=checkpointing_ffn)
# context = stacked_encoder_output[0]
# how to get the correct attention mask?
output_dict = defaultdict(lambda: None, {'source': input, 'context': context, 'src_mask': dec_attn_mask,
'src': dec_attn_mask, 'pos_emb': None,
'wav2vec_context': wav2vec_context,
'wav2vec_padding_mask': wav2vec_padding_mask,
'enc_pred_lang': wav2vec_output['pred_lang']})
return output_dict
class Wav2vecTransformer(Transformer):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None,
mirror=False, ctc=False, **kwargs):
super().__init__(encoder, decoder, generator, None, None, ctc=ctc)
self.model_size = self.decoder.model_size
self.switchout = self.decoder.switchout
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.ctc:
self.ctc_linear = Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, adv_ptb_grad=False, input_ptb=None, factorize=False,
mirror=False, target_mask=None, **kwargs):
"""
:param factorize:
:param mirror:
:param adv_ptb_grad: If we need to tell the model to set input.requires_grad=True (1st step)
:param input_ptb: 2nd step of adversarial: add the perturbation to input
:param batch: data object sent from the dataset
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, adv_ptb_grad=adv_ptb_grad, input_ptb=input_ptb)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
src = encoder_output['src']
# pass the mask ('src') from the encoder output the decoder as the attention mask
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
factorize=factorize)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['target'] = batch.get('target_output')
output_dict['source'] = encoder_output['source']
# final layer: computing softmax
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
# raise NotImplementedError
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
# load pretrained wav2vec weights
def load_encoder_weights(self, checkpoint):
self.encoder.wav2vec_encoder.load_state_dict(checkpoint['model'])
def create_decoder_state(self, batch, beam_size=1, type=2, buffering=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param pretrained_layer_states:
:param buffering:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atbs')
src_atb = batch.get('source_atbs')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # transpose -> batch first
encoder_output = self.encoder(src_transposed)
src = encoder_output['src'].transpose(0, 1)
src_mask = encoder_output['src']
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=src_mask)
return decoder_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = torch.nn.functional.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
class Wav2vecBERT(Wav2vecTransformer):
def __init__(self, encoder, decoder, generator=None,
mirror=False, ctc=False, encoder_type='wav2vec2',
decoder_type='bart',
sub_encoder=None, mutual_modality_training=False, **kwargs):
super().__init__(encoder, decoder, generator, mirror=mirror, ctc=ctc)
self.src_vocab_size = 0
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.sub_encoder = sub_encoder
if hasattr(decoder, 'dec_pretrained_model') and decoder.dec_pretrained_model:
try:
self.model_size = self.decoder.config.bert_hidden_size
self.tgt_vocab_size = self.decoder.config.vocab_size
except AttributeError:
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.generator[0].linear.weight.size(0)
self.switchout = 0
else:
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
self.switchout = self.decoder.switchout
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def forward(self, batch, zero_encoder=False, factorize=False, target_mask=None, mirror=False,
checkpointing_ffn=False,
checkpointing_cross_attn=False,
checkpointing_self_attn=False,
**kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_cross_attn:
:param checkpointing_ffn:
:param batch:
:param zero_encoder:
:param factorize:
:param target_mask:
:param mirror:
:param kwargs:
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_atb = batch.get('source_atbs')
tgt_atb = batch.get('target_atbs')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
batch_first_output = False
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bart"]:
batch_first_output = True
# print(src_lang, src_atb, tgt_lang, tgt_atb)
# during training mixture is always None
encoder_output = self.encoder(src, batch_first_output=batch_first_output,
lang=src_lang, atb=src_atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
src_attention_mask = encoder_output['src']
contrastive_loss = 0
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
# src: [b, src_l] context: [b, src_l, de_model]
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.new(*tgt.size()).fill_(1) # [bsz, len]
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
token_type_ids=tgt_token_type,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
no_offset=True,
)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
context = context.transpose(0, 1) # to [src_l, b, de_model]
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bart"]:
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.new(*tgt.size()).fill_(1) # [bsz, len]
# the wav2vec returned mask is 1 for masked and 0 for un-masked, which is opposite to huggingface
src_attention_mask = 1 - (src_attention_mask.long())
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
context = context.transpose(0, 1)
output_dict = defaultdict(lambda: None)
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["deltalm", "mbart", "mbart50"]:
if self.sub_encoder is not None:
src_text_input = batch.get('target')
sub_context_mask = batch.get('tgt_selfattn_mask')
with torch.no_grad():
sub_encoder_output = self.sub_encoder(input_ids=src_text_input,
attention_mask=sub_context_mask)
sub_context = sub_encoder_output[0]
# print(torch.isnan(sub_context).float().sum())
else:
sub_context = None
sub_context_mask = None
src_attention_mask = src_attention_mask # new version
# tgt_attention_mask = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
# tgt_attention_mask = tgt.new(*tgt.size()).fill_(1)
tgt_attention_mask = batch.get('target_input_selfattn_mask')
if encoder_output['enc_pred_lang'] is not None:
_src_lang = torch.nn.functional.softmax(encoder_output['enc_pred_lang'], dim=-1, dtype=torch.float32)
else:
_src_lang = src_lang
decoder_outputs = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
sub_encoder_hidden_states=sub_context,
sub_encoder_attention_mask=sub_context_mask,
lang=tgt_lang, atb=tgt_atb,
src_lang=_src_lang,
checkpointing_ffn=checkpointing_ffn,
checkpointing_cross_attn=checkpointing_cross_attn,
checkpointing_self_attn=checkpointing_self_attn)
decoder_output = decoder_outputs[0]
# contrastive_loss = decoder_outputs[-1]
output = decoder_output
output_dict = defaultdict(lambda: None)
else:
# pass the mask ('src') from the encoder output the decoder as the attention mask
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
factorize=factorize)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['target'] = batch.get('target_output')
output_dict['wav2vec_context'] = encoder_output['wav2vec_context']
output_dict['wav2vec_padding_mask'] = encoder_output['wav2vec_padding_mask']
output_dict['enc_pred_lang'] = encoder_output['enc_pred_lang']
if output_dict['enc_pred_lang'] is not None:
output_dict['dec_pred_lang'] = decoder_outputs[-1]
# final layer: computing softmax
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
# run the ctcoutput via the wav2vec context (not context)
output_dict['encoder_logits'] = self.ctc_linear(output_dict['wav2vec_context'])
if self.sub_encoder is not None:
# contrastive loss has size: t x b x h
# stacked sum from multiple layers
contrastive_loss = contrastive_loss.transpose(0, 1).contiguous()
# the input is the target full without the final token so
# remove the last time step from the mask
mask = sub_context_mask[:, :-1].unsqueeze(-1) # b x t x 1
contrastive_loss.masked_fill_(mask, 0) # masked values = zero
output_dict['contrastive_loss'] = contrastive_loss.sum()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_atb = batch.get('source_atbs')
tgt_atb = batch.get('target_atbs')
encoder_output = self.encoder(src.transpose(0, 1), batch_first_output=False,
lang=src_lang, atb=src_atb)
src_attention_mask = encoder_output['src']
dec_pretrained_model = self.decoder.dec_pretrained_model
if not dec_pretrained_model:
mask_src = None
elif dec_pretrained_model in ["bert", "roberta"]:
mask_src = src_attention_mask.unsqueeze(1) # batch_size x 1 x len_src for broadcasting
elif dec_pretrained_model in ["bart"]:
mask_src = 1 - (src_attention_mask.long())
elif dec_pretrained_model in ["deltalm", "mbart", "mbart50"]:
mask_src = src_attention_mask
else:
print("Warning: unknown dec_pretrained_model")
raise NotImplementedError
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=mask_src,
dec_pretrained_model=self.decoder.dec_pretrained_model,
tgt_atb=tgt_atb)
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
self.generator[0].linear.weight = self.decoder.embeddings.word_embeddings.weight
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["mbart", "mbart50", "deltalm"]:
self.generator[0].linear.weight = self.decoder.embed_tokens.weight
else:
self.generator[0].linear.weight = self.decoder.word_lut.weight
def decode(self, batch):
raise NotImplementedError
# """
# :param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
# :return: gold_scores (torch.Tensor) log probs for each sentence
# gold_words (Int) the total number of non-padded tokens
# allgold_scores (list of Tensors) log probs for each word in the sentence
# """
#
# src = batch.get('source')
# src_pos = batch.get('source_pos')
# tgt_input = batch.get('target_input')
# tgt_output = batch.get('target_output')
# tgt_pos = batch.get('target_pos')
# # tgt_atb = batch.get('target_atb') # a dictionary of attributes
# src_lang = batch.get('source_lang')
# tgt_lang = batch.get('target_lang')
#
# # transpose to have batch first
# src = src.transpose(0, 1)
# tgt_input = tgt_input.transpose(0, 1)
# batch_size = tgt_input.size(0)
#
# context = self.encoder(src, input_pos=src_pos, input_lang=src_lang)['context']
#
# if hasattr(self, 'autoencoder') and self.autoencoder \
# and self.autoencoder.representation == "EncoderHiddenState":
# context = self.autoencoder.autocode(context)
#
# gold_scores = context.new(batch_size).zero_()
# gold_words = 0
# allgold_scores = list()
# decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
# input_pos=tgt_pos)['hidden']
#
# output = decoder_output
#
# if hasattr(self, 'autoencoder') and self.autoencoder and \
# self.autoencoder.representation == "DecoderHiddenState":
# output = self.autoencoder.autocode(output)
#
# for dec_t, tgt_t in zip(output, tgt_output):
#
# dec_out = defaultdict(lambda: None)
# dec_out['hidden'] = dec_t.unsqueeze(0)
# dec_out['src'] = src
# dec_out['context'] = context
#
# if isinstance(self.generator, nn.ModuleList):
# gen_t = self.generator[0](dec_out)['logits']
# else:
# gen_t = self.generator(dec_out)['logits']
# gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
# gen_t = gen_t.squeeze(0)
# tgt_t = tgt_t.unsqueeze(1)
# scores = gen_t.gather(1, tgt_t)
# scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
# gold_scores += scores.squeeze(1).type_as(gold_scores)
# gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
# allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
#
# return gold_words, gold_scores, allgold_scores
| 47,708
| 43.839286
| 123
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/wavlm.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2vec2.file_io import PathManager
from omegaconf import DictConfig, open_dict, OmegaConf
from .fairseq_wav2vec2.utils import overwrite_args_by_name
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
return state
class WavLMEncoder(nn.Module):
def __init__(self, opt, model_path="wav2vec_vox_new.pt",
**kwargs):
super().__init__()
# do we need opt for this?
self.opt = opt
self.model_path = model_path
# import fairseq
# from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Model
from .fairseq_wav2vec2.wavlm import WavLM, WavLMConfig
state = load_checkpoint_to_cpu(model_path)
self.cfg = WavLMConfig(state['cfg'])
print("Overiding WavLM dropout ....")
self.cfg.dropout = self.opt.enc_pretrain_emb_dropout
self.cfg.activation_dropout = self.opt.ffn_dropout
self.cfg.encoder_layerdrop = self.opt.death_rate
# self.cfg.dropout_features = self.opt.emb_dropout
# self.cfg.mask_channel_before = True
self.cfg.mask_channel_prob = 0.2 if self.opt.wav2vec_spec_augment else 0.0
self.cfg.mask_channel_length = 64
self.cfg.mask_prob = 0.0
self.wav2vec_encoder = WavLM(self.cfg)
# load wav2vec weights
wav2vec_weights = state['model']
existed_weights = self.wav2vec_encoder.state_dict()
# if we add new weights/buffers to new model then put them into the state_dict
keys = existed_weights.keys()
for key in keys:
if key not in wav2vec_weights:
wav2vec_weights[key] = existed_weights[key]
self.wav2vec_encoder.load_state_dict(wav2vec_weights)
cfg = self.cfg
assert self.opt.model_size == cfg.encoder_embed_dim, \
"Expect self.opt.model_size (%d) and cfg.encoder_embed_dim (%d) to equal " \
% (self.opt.model_size, cfg.encoder_embed_dim)
self.input_type = self.opt.encoder_type
self.model_size = cfg.encoder_embed_dim
self.time = None
# freezing the parameters of the Convolutional feature extractors (by default)
for param in self.wav2vec_encoder.feature_extractor.parameters():
param.requires_grad = False
# freeze the whole encoder. needs to do this first before adding customized parameters
if opt.freeze_encoder:
print("[INFO] Freezing encoder parameters")
for p in self.wav2vec_encoder.parameters():
p.requires_grad = False
if opt.freeze_encoder_ffn:
self.freeze_ffn_params()
def freeze_ffn_params(self):
for layer in self.wav2vec_encoder.encoder.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
def forward(self, input, batch_first_output=False, adv_ptb_grad=False, input_ptb=None,
lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False, **kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_ffn:
:param atb:
:param lang:
:param input_ptb: perturbation added to the input itself
:param adv_ptb_grad: adversarial perturbation step which we need the gradients w.r.t the input (wavs)
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
# 0 for tokens that are not masked, 1 for tokens that are masked
with torch.no_grad():
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1)
if adv_ptb_grad:
input.requires_grad = True
if input_ptb is not None:
assert not adv_ptb_grad
with torch.no_grad():
# normalize and add to input / maybe scale over input length?
# do this under fp32
with torch.cuda.amp.autocast(enabled=False):
epsilon = 1.0
input_ptb = input_ptb.float()
input_ptb = input_ptb / F.normalize(input_ptb, p=2.0, dim=2)
input = input.float() + input_ptb * epsilon
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
attn_mask = long_mask
quantize_only = False # self.quantize and not self.dual_output
# don't mask when precomputed tdnn is used, because spec augmentation is used in the dataset
wav2vec_output = self.wav2vec_encoder(input, attn_mask,
mask=self.training, features_only=True, layer=None,
precomputed_tdnn=precomputed_tdnn,
quantize_only=quantize_only,
lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
# output size is always T x B x C
continuous_output = wav2vec_output['x']
time, batch_size = continuous_output.size(0), continuous_output.size(1)
# mask size is B x T (1 for padded positions, 0 for unpadded)
dec_attn_mask = wav2vec_output['padding_mask']
context = continuous_output
if dec_attn_mask is None:
dec_attn_mask = context.new_zeros(batch_size, time).byte()
else:
dec_attn_mask = dec_attn_mask.byte()
wav2vec_context = context
wav2vec_padding_mask = dec_attn_mask
output_dict = defaultdict(lambda: None, {'source': input, 'context': context, 'src_mask': wav2vec_padding_mask,
'src': wav2vec_padding_mask, 'pos_emb': None,
'wav2vec_context': wav2vec_context,
'wav2vec_padding_mask': wav2vec_padding_mask})
return output_dict
| 8,507
| 40.300971
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/conformer_layers.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.dropout import variational_dropout
from onmt.modules.convolution import ConformerConvBlock
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
class ConformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(ConformerEncoderLayer, self).__init__()
# FFN -> SelfAttention -> Conv -> FFN
# PreNorm
self.opt = opt
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.ffn_scale = 0.5
self.mfw = opt.multilingual_factorized_weights
self.weight_drop = opt.weight_drop
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.mfw:
self.attn = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
else:
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.preprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
if self.mfw:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
# there is batch norm inside convolution already
# so no need for layer norm?
self.preprocess_conv = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_conv = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.conv = ConformerConvBlock(opt.model_size, opt.conv_kernel)
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None,
src_lang=None):
assert incremental is False
assert incremental_cache is None
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
if coin:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
out = out * ffn_scale
if not self.variational:
out = F.dropout(out, p=self.dropout, training=self.training)
else:
out = variational_dropout(out, p=self.dropout, training=self.training)
input = input + out
# attention
attn_input = self.preprocess_attn(input)
if self.mfw:
out, _ = self.attn(attn_input, pos_emb, src_lang, attn_mask, None)
else:
out, _ = self.attn(attn_input, pos_emb, attn_mask, None)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
# convolution
conv_input = self.preprocess_conv(input)
out = self.conv(conv_input)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_conv(out, input)
# last ffn
out = self.feedforward(self.preprocess_ffn(input), src_lang)
out = out * ffn_scale
if not self.variational:
out = F.dropout(out, p=self.dropout, training=self.training)
else:
out = variational_dropout(out, p=self.dropout, training=self.training)
input = input + out
return input
return input
| 6,311
| 43.450704
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.modules.checkpoint import checkpoint
# from torch.utils.checkpoint import checkpoint
from onmt.modules.identity import Identity
torch.set_printoptions(threshold=500000)
def create_forward_function(module):
def forward_pass(*inputs):
return module(*inputs)
return forward_pass
class SpeechTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.checkpointing = opt.checkpointing
self.mpw = opt.multilingual_partitioned_weights
self.multilingual_linear_projection = opt.multilingual_linear_projection
self.mln = opt.multilingual_layer_norm
self.no_input_scale = opt.no_input_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.max_pos_length = opt.max_pos_length
# TODO: multilingually linear transformation
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
assert not self.rotary_position_encoding
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if self.multilingual_linear_projection:
self.linear_proj = nn.Parameter(torch.Tensor(opt.n_languages, self.model_size, self.model_size))
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
torch.nn.init.normal_(self.linear_proj, 0.0, std_)
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, factorize=True,
return_states=False, pretrained_layer_states=None, **kwargs):
"""
:param pretrained_layer_states:
:param return_states: also return the (unnormalized) outputs of each state
:param factorize:
:param input: [B x T x Input_Size]
:param input_pos: [B x T] positions
:param input_lang: [B] language ids of each sample
:param streaming: connect different segments in transformer-xl style
:param kwargs:
:return:
"""
with torch.no_grad():
nan_mask = torch.isnan(input)
if nan_mask.any():
input.masked_fill_(nan_mask, 0)
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
# note that this is actually conv2d so channel=1, f=40
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2) # [bsz, channels, time, f]
# apply CNN
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
if not self.no_input_scale:
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# B x T x H -> T x B x H
context = emb
if streaming:
hids = [context]
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
# maybe try multiplicative ...
# adding the speech represetation into the first input
if pretrained_layer_states is not None:
context = context + pretrained_layer_states
layer_states = dict()
if self.mpw:
input_lang = self.factor_embeddings(input_lang).squeeze(0)
assert input_lang.ndim == 1
if self.reversible:
context = torch.cat([context, context], dim=-1)
assert streaming is not True, "Streaming and Reversible is not usable yet."
context = ReversibleEncoderFunction.apply(context, pos_emb, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
mems_i = mems[i] if mems is not None and streaming and self.max_memory_size > 0 else None
context = layer(context, pos_emb, mask_src, mems=mems_i, src_lang=input_lang, factorize=factorize)
# Summing the context
# if pretrained_layer_states is not None and i == (self.layers - 1):
# context = context + pretrained_layer_states[i]
if streaming:
hids.append(context)
if return_states:
layer_states = context
# final layer norm
context = self.postprocess_layer(context, factor=input_lang)
if self.multilingual_linear_projection:
language_linear_weight_ = torch.index_select(self.linear_proj, 0, input_lang).squeeze(0)
# context = F.linear(context, language_linear_weight_)
t, b = context.size(0), context.size(1)
context = torch.mm(context.view(-1, context.size(-1)), language_linear_weight_)
context = context.view(t, b, context.size(-1))
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask,
'src': input, 'pos_emb': pos_emb})
if return_states:
output_dict['layer_states'] = layer_states
if streaming:
# streaming_state.prev_src_mem_size += sum(input_length.tolist())
# streaming_state.prune_source_memory(self.max_memory_size)
streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class SpeechTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.mpw = opt.multilingual_partitioned_weights
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, language_embeddings,
ignore_source,
allocate_positions=False)
if self.learnable_position_encoding:
self.positional_encoder = None
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
# self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
# self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
def renew_buffer(self, new_len):
return
def build_modules(self):
self.death_rate = 0.0
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
opt = self.opt
print("* Speech Transformer Decoder with Relative Attention with %.2f layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None,
src_lang=None, tgt_lang=None, streaming=False, factorize=True, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
elif self.encoder_type in ["wav2vec2", "wav2vec2_scp"]:
mask_src = src
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input must be left-aligned
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
dec_attn_mask = dec_attn_mask.bool()
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos_emb = self.positional_encoder(pos, bsz=input.size(1))
output = self.preprocess_layer(emb.contiguous())
# pos_emb = self.preprocess_layer(pos_emb)
if self.mpw:
src_lang = self.factor_embeddings(src_lang).squeeze(0)
tgt_lang = self.factor_embeddings(tgt_lang).squeeze(0)
assert src_lang.ndim == 1 and tgt_lang.ndim == 1
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=tgt_lang, factorize=factorize)
output = self.postprocess_layer(output, factor=tgt_lang)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
src_mask = decoder_state.src_mask if decoder_state.src_mask is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
# emb = self.word_lut(input) * math.sqrt(self.model_size)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb = self.positional_encoder(pos)
if self.learnable_position_encoding:
if buffering:
distance_mat = torch.arange(-klen + 1, 1, 1, device=emb.device).unsqueeze(0)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
dec_attn_mask = torch.triu(
emb.new_ones(klen, klen), diagonal=1 + mlen).byte()[:, :, None] # [:, :, None]
if buffering:
dec_attn_mask = dec_attn_mask[-1].unsqueeze(0)
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
# The "slow" version of translator only keeps the source mask of audio as src
# Thats why we need to check if the src has already been narrowed before
if src.dim() == 3:
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_type == "wav2vec2":
# mask_src = src
mask_src = src_mask
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang)
# normalize and take the last time step
output = self.postprocess_layer(output, factor=lang)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class RelativeTransformer(Transformer):
def create_decoder_state(self, batch, beam_size=1, type=1, streaming=False, previous_decoding_state=None,
factorize=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param factorize:
:param pretrained_layer_states:
:param previous_decoding_state:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
# in this case batch size should be 1
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src_transposed = src.transpose(0, 1)
if previous_decoding_state is None:
# if the previous stream is None (the first segment in the stream)
# then proceed normally like normal translation
# init a new stream state
streaming_state = self.init_stream()
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=streaming, streaming_state=streaming_state,
factorize=factorize, pretrained_layer_states=pretrained_layer_states)
if streaming:
decoder_state = StreamDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True, streaming_state=streaming_state)
else:
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type)
else:
streaming_state = previous_decoding_state.streaming_state
# to have the same batch/beam size with the previous memory ..
src_transposed = src_transposed.repeat(beam_size, 1)
src = src.repeat(1, beam_size)
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=True, streaming_state=streaming_state)
context = encoder_output['context']
if self.decoder.extra_context_size > 0:
# print("Using extra context with extra %d states" % self.decoder.extra_context_size)
# print("")
prev_context = previous_decoding_state.context
extra_context = prev_context[-self.decoder.extra_context_size:].detach()
context = torch.cat([extra_context, context], dim=0)
prev_src = previous_decoding_state.src[-self.decoder.extra_context_size:].detach()
src = torch.cat([prev_src, src], dim=0)
decoder_state = StreamDecodingState(src, tgt_lang, context,
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=False, streaming_state=streaming_state)
return decoder_state
def init_stream(self):
param = next(self.parameters())
layers = self.decoder.layers
streaming_state = StreamState(layers, self.decoder.max_memory_size, param.device, param.dtype)
return streaming_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict).squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.encoder.max_memory_size = src_memory_size
self.decoder.max_memory_size = tgt_memory_size
| 28,183
| 42.293395
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer_layers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.multilingual_partitioned.linear import MPPositionWiseFeedForward
from onmt.modules.multilingual_partitioned.encdec_attention import MPEncdecMultiheadAttn
from onmt.modules.multilingual_partitioned.relative_attention import MPRelativeSelfMultiheadAttn
from onmt.modules.convolution import ConformerConvBlock
from onmt.modules.identity import Identity
class LIDFeedForward(nn.Module):
def __init__(self, *args):
super().__init__()
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class RelativeTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.depthwise_conv = opt.depthwise_conv
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.no_ffn = opt.no_ffn
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if self.mfw:
assert not self.mpw, "[ERROR] factorized and partitioned weights cannot be used at the same time."
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if not self.no_ffn:
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
if not self.no_ffn:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
if not self.no_ffn:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
if not self.no_ffn:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
if self.depthwise_conv:
self.preprocess_conv = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_conv = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.depthwise_conv = ConformerConvBlock(opt.model_size, opt.conv_kernel, bias=True)
else:
self.depthwise_conv = None
if self.multilingual_adapter:
from onmt.modules.multilingual_factorized.multilingual_adapters import MultilingualAdapter
self.adapters = MultilingualAdapter(opt.model_size, opt.adapter_bottleneck_size,
n_languages=opt.n_languages,
dropout=opt.dropout)
def forward(self, input, pos_emb, attn_mask, src_lang=None, factorize=False,
incremental=False, incremental_cache=None, mems=None):
"""
:param factorize:
:param input: tensor [T x B x H]
:param pos_emb: tensor [T x 1 x H]
:param attn_mask: tensor [1 x T x B]
:param src_lang: tensor [B] or None
:param incremental: None
:param incremental_cache:
:param mems: None
:return:
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
"""
Self-attention block
"""
query = self.preprocess_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, factorize=factorize,
incremental=incremental, incremental_cache=incremental_cache, )
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
"""
Convolution block
"""
if self.depthwise_conv:
if attn_mask is not None and attn_mask.any():
conv_mask = attn_mask.squeeze(0).unsqueeze(-1)
else:
conv_mask = None
out = self.depthwise_conv(self.preprocess_conv(input, factor=src_lang), pad_mask=conv_mask)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_conv(out, input)
"""
Feed forward layer
"""
if not self.no_ffn:
out = self.feedforward(self.preprocess_ffn(input, factor=src_lang), src_lang, factorize=factorize)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, src_lang)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, lid_net=None):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if not self.ignore_source:
self.preprocess_src_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_src_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
self.multihead_src = MPEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout, sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead_tgt = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead_tgt = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
# self.lfv_multilingual = opt.lfv_multilingual
#
# if opt.lfv_multilingual:
# self.lid_net = lid_net
# self.lfv_mapper = nn.Linear(opt.bottleneck_size, opt.model_size)
# else:
# self.lid_net = None
# self.lfv_mapper = None
if self.multilingual_adapter:
from onmt.modules.multilingual_factorized.multilingual_adapters import MultilingualAdapter
self.adapters = MultilingualAdapter(opt.model_size, opt.adapter_bottleneck_size,
n_languages=opt.n_languages,
dropout=opt.dropout)
def forward(self, input, context, pos_emb, mask_tgt=None, mask_src=None,
src_lang=None, tgt_lang=None,
incremental=False, incremental_cache=None, reuse_source=True, mems=None, factorize=False):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems, factor=tgt_lang)
else:
mems = None
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input, factor=tgt_lang)
if self.mfw or self.mpw:
out, _ = self.multihead_tgt(query, pos_emb, tgt_lang, None, mask_tgt, factorize=factorize,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input, factor=tgt_lang)
incremental_source = incremental and reuse_source
if self.mfw or self.mpw:
out, coverage = self.multihead_src(query, context, context, tgt_lang, tgt_lang, mask_src,
factorize=factorize,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, factor=tgt_lang), tgt_lang, factorize=factorize)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, tgt_lang)
else:
coverage = None
return input, coverage, incremental_cache
| 24,391
| 52.026087
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/mssm/mhs4.py
|
#!/usr/bin/env python3
from typing import Optional, List, Tuple, Union
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
# import pykeops
# import pykeops.torch
# from pykeops.torch import LazyTensor
from einops import rearrange, repeat
from opt_einsum import contract
from torch.cuda.amp import autocast
try:
from .ssm_kernel.ssm_kernel_coefficient import compute_kernel_coefficient
except ImportError:
from ssm_kernel.ssm_kernel_coefficient import compute_kernel_coefficient
@torch.no_grad()
def bilinear_discretization(
A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, D: torch.Tensor, T: torch.Tensor
):
"""
Performs a bilinear transformation of the (diagonal + lowrank) transition A and input matrix B.
For a given tensor of N different time steps, this function will return N approximations to A and B.
Parameters:
A: shape (Q, N, N)
B: shape (Q, N)
C: shape (Q, C, H, N)
D: shape (Q, C, H)
T: shape (Q, H)
Returns:
dA: shape (Q, H, N, N)
dB: shape (Q, H, N)
dC: shape (Q, C, H, N)
dD: shape (Q, C, H)
"""
# Factor term reused for A and B
factor = 0.50 * contract("qh,qnm->qhnm", T, A)
# Get identity (1, N, N)
identity = torch.eye(A.size(-1)).to(A).unsqueeze(0).unsqueeze(0)
# Correction term
correction = torch.linalg.inv(identity - factor)
# Get bilinear A and B
dA = contract("qhnm,qhmk->qhnk", correction, identity + factor)
dB = contract("qhnm,qh,qm->qhn", correction, T, B)
return dA, dB, C, D
def get_activation(act: str = "gelu"):
if act == "relu":
return nn.ReLU()
if act == "gelu":
return nn.GELU()
if act == "swish":
return nn.SiLU()
if act == "glu":
return nn.GLU()
return nn.Identity()
def gen_noisy_linear_weights(parameter_noise, weight):
"""Get Gaussian noisy linear weights based on given noise level ....
and the weights themselves. The noise are normalized per channel (dim=1).
InputArgs:
parameter_noise: float, noise level, [0.0, 1.0]
weight: Tensor, a weight tensor of a matrix
Return:
noisy_weight: Tensor, same dimension as weight, but with noise added.
"""
noise = torch.randn_like(weight).to(device=weight.device)
normalized_noise = noise / torch.norm(noise, dim=1, keepdim=True)
w_norm = torch.norm(weight, dim=1, keepdim=True).detach()
scale = parameter_noise * w_norm
noisy_weight = weight + scale * normalized_noise
return noisy_weight
class Linear(torch.nn.Linear):
def __init__(
self,
input_dim,
output_dim,
bias=True,
parameter_noise: float = 0.0,
device=None,
dtype=None,
):
super(Linear, self).__init__(
in_features=input_dim,
out_features=output_dim,
bias=bias,
device=device,
dtype=dtype,
)
# mirror torch.nn.linear to set device and detype
self.parameter_noise = parameter_noise
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0 and self.training:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.get_noisy_weight(self.weight), self.bias)
class TiedStateSpaceModel(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int = 64,
num_heads: int = 1,
channels: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu=True
):
super().__init__()
# Store inputs
self.input_dim = self.H = input_dim # input dimensions
self.hidden_dim = self.N = hidden_dim # N = number of SSM copies?
self.num_heads = self.Q = num_heads # Q = number of heads
self.channels = self.C = channels
self.parameter_noise = parameter_noise
# Create diagonal transition matrix
self.diagonal = nn.Parameter(
math.log(scale) + torch.randn(num_heads, hidden_dim)
)
if create_on_gpu:
self.diagonal.data = self.diagonal.data.cuda()
# print(self.diagonal.device)
device = self.diagonal.device
# Create lowrank correction
self.lowrank = nn.Parameter(torch.randn(num_heads, hidden_dim)).to(device)
# Create discretization step per channel
self.timestep = nn.Parameter(
torch.rand(num_heads, input_dim)
* (math.log(timestep_max) - math.log(timestep_min))
+ math.log(timestep_min)
).to(device)
# Initialise remaining parameters
self.register("input_matrix", (num_heads, hidden_dim), dim=1, device=device)
self.register(
"output_matrix",
(num_heads, self.channels, input_dim, hidden_dim),
dim=hidden_dim,
device=device
)
self.register("skip_matrix", (num_heads, channels, input_dim), dim=1, device=device)
# Register omega parameter
self.setup(maxlen, dtype=torch.cfloat, device=self.diagonal.device)
self.use_fast_kernel = use_fast_kernel
def register(self, name, size, dim, lr=None, device=None):
# Random uniform initialization
weight = torch.rand(*size).to(device)
weight = (2 * weight - 1) / math.sqrt(dim)
# Register trainable parameter
self.register_parameter(name, nn.Parameter(weight))
# Add learning rate
optim = {}
if lr is not None:
optim["lr"] = lr
if len(optim) > 0:
setattr(getattr(self, name), "_optim", optim) # noqa
@torch.no_grad()
def get_correction_factor(self, double=False):
# Get the parameters which are transformed (do not use noisy training on these params)
d = self.get_diagonal() # (Q, N)
p = self.get_lowrank() # (Q, N)
t = self.get_timestep() # (Q, H)
identity = torch.eye(self.hidden_dim).to(d).unsqueeze(0).unsqueeze(0)
# Get continous matrix (H, N, N)
A = 0.50 * contract("qh,qnm->qhnm", t, self.get_transition(d, p))
# Get discretized A naively
# print("Solving dA = solve(identity - A, identity + A)", A.size(), A.type(), A.device)
dA = torch.linalg.solve(identity - A, identity + A)
# Correction factor
# Get identity (1, N, N)
if double:
return identity + torch.matrix_power(dA, self.maxlen)
return identity - torch.matrix_power(dA, self.maxlen)
@torch.no_grad()
def setup(self, maxlen, dtype, device, double=False):
"""
Calculate (and cache) FFT nodes and their "unprocessed" them with the bilinear transform
This should be called everytime the internal length changes
"""
# Update internal length
self.maxlen = maxlen
# Get the correction matrix (H, N, N)
correction = self.get_correction_factor(double)
# Now correct for the length by modifying the output matrix using every input channel
# Do not call the get_output_matrix to avoid noise injection
weight = self.output_matrix.data
weight = contract("qchn,qhnk->qchk", weight, correction).contiguous()
self.output_matrix.data = weight
# Double length if a sequence has been encountered with longer than supported length
if double:
self.maxlen *= 2
self.setup_omega_z(dtype, device)
@torch.no_grad()
def setup_omega_z(self, dtype, device):
self.L = self.maxlen
# Create array on the unit circle
omega = torch.tensor(
np.exp(-2j * np.pi / self.maxlen), dtype=dtype, device=device
)
omega = omega ** torch.arange(self.maxlen, device=device)
# Create the bilinear transformation
z = 2 * (1 - omega) / (1 + omega)
# Store these for faster computation
self.register_buffer("omega", torch.view_as_real(omega))
# define self.z here
self.register_buffer("z", torch.view_as_real(z))
@torch.no_grad()
def setup_linear(self):
"""
This computes the factors necessary to run the recurrent form efficiently.
"""
# Update the output matrix for correction
correction = self.get_correction_factor()
correction = torch.linalg.inv(correction)
# Now correct for the length by modifying the output matrix using every head
# Do not call the get_output_matrix to avoid noise injection
weight = self.output_matrix.data # (..., HN) -> (H, ..., N)
weight = contract("qchn,qhnk->qchk", weight, correction).contiguous()
self.output_matrix.data = weight
# Get all quantities
d = self.get_diagonal() # (Q, N)
p = self.get_lowrank() # (Q, N)
t = self.get_timestep() # (Q, H)
# For the A0 matrix
d0 = 2 / t.unsqueeze(-1) + d.unsqueeze(-2)
f0 = repeat(p, "q n -> q h n", h=self.input_dim)
s0 = 1.0
# For the A1 matrix
d1 = 1 / (2 / t.unsqueeze(-1) - d.unsqueeze(-2))
f1 = d1 * p.unsqueeze(-2)
s1 = 1 / (1 + contract("qhn,qhn,qhn->qh", f0, d1, f0)).unsqueeze(-1)
# Compute the discretized states
dA, dB, dC, dD = bilinear_discretization(
self.get_transition(),
self.input_matrix,
self.output_matrix,
self.skip_matrix,
self.get_timestep(),
)
self.linear_params = {
"d0": d0, # (Q, H, N)
"d1": d1, # (Q, H, N)
"f0": f0, # (Q, H, N)
"f1": f1, # (Q, H, N)
"s0": s0, # (1)
"s1": s1, # (Q, H, 1)
"dA": dA, # (Q, H, N, N)
"dB": dB, # (Q, H, N)
"dC": dC, # (Q, C, H, N)
"dD": dD, # (Q, C, H)
}
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0 and self.training:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
def get_diagonal(self):
return -torch.exp(self.diagonal)
def get_lowrank(self):
return self.lowrank
def get_transition(self, d=None, p=None):
d = d if d is not None else self.get_diagonal()
p = p if p is not None else self.get_lowrank()
return torch.diag_embed(d) - contract("qm,qn->qmn", p, p)
def get_timestep(self):
return torch.exp(self.timestep)
def get_input_matrix(self):
return self.get_noisy_weight(self.input_matrix) # (Q, H)
def get_output_matrix(self):
return self.get_noisy_weight(self.output_matrix) # (Q, C, H, N)
def get_skip_matrix(self):
return self.get_noisy_weight(self.skip_matrix) # (Q, C, H)
def get_dwoodbury(self, z, d, invt):
# Get the bilinear transformation
z = contract("l,qh->qlh", torch.view_as_complex(z), invt)
# Compute the term and reuse computations (Q, L, H, N)
return 1 / (z.unsqueeze(-1) - d.unsqueeze(-2).unsqueeze(-2))
def compute_slow(self, z, d, t, b, c):
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
d = d.to(z.dtype)
# Get the memory heavy denominator
r = self.get_dwoodbury(z, d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
return kernelcc
def get_kernel(self):
# Get the parameters which are transformed
d = self.get_diagonal() # (Q, N)
t = self.get_timestep() # (Q, H)
# Get the lowrank contribution and input matrix
p = self.get_lowrank() # (Q, N)
b = self.get_input_matrix() # (Q, H)
c = self.get_output_matrix() # (Q, C, H, N)
# Since we have tied states
b = repeat(b, "q n -> q 1 h n", h=self.input_dim) # (Q, 1, H, N)
p = repeat(p, "q n -> q 1 h n", h=self.input_dim) # (Q, 1, H, N)
# For batched operations
b = torch.cat([b, p], dim=1) # (Q, 2, H, N)
c = torch.cat([c, p], dim=1) # (Q, C + 1, H, N)
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# r = self.get_dwoodbury(d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
# kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
# Compute kernel coeffs
# kernelcc = self.compute_slow(self.z, d, t, b, c)
# print(self.z.type(), d.type(), t.type(), b.type(), c.type())
kernelcc = compute_kernel_coefficient(self.z, d, t, b, c, fast=self.use_fast_kernel)
# Compute kernel assuming low rank of 1 (Q, 2, C, L, H) -> (Q, 1, C, L, H)
unit = 2 / (1 + torch.view_as_complex(self.omega))
kernel = kernelcc[:, :-1, :-1] - kernelcc[:, -1:, :-1] * kernelcc[
:, :-1, -1:
] / (1 + kernelcc[:, -1:, -1:])
kernel = kernel.squeeze(1) # (Q, C, L, H)
kernel = contract("l,qclh->lqch", unit, kernel)
kernel = torch.fft.irfft(kernel, n=kernel.size(0), dim=0)
return kernel.float()
"""
def get_kernel_lazy(self):
# Get the parameters which are transformed
d = self.get_diagonal() # (Q, N)
t = self.get_timestep() # (Q, H)
# Get the input and output matrix
b = self.get_input_matrix() # (Q, N)
c = self.get_output_matrix() # (Q, C, H, N)
# Force values to be fp32
if t.dtype == torch.float16:
t = t.to(self.z.dtype)
b = b.to(self.z.dtype)
c = c.to(self.z.dtype)
d = d.to(self.z.dtype)
# Map to lazy vectors for memory efficient computation
d = LazyTensor(d.view(self.Q, 1, self.N, 1, 1))
t = LazyTensor(t.view(self.Q, 1, 1, 1, self.H))
b = LazyTensor(b.view(self.Q, 1, self.N, 1, 1))
c = LazyTensor(
c.view(self.Q, self.C, self.H, 1, self.N).transpose(2, 4).contiguous()
)
# Complex Lazy Tensors
z = torch.view_as_complex(self.z)
z = LazyTensor(z.view(1, 1, 1, self.L, 1))
o = 2 / (1 + torch.view_as_complex(self.omega))
o = LazyTensor(o.view(1, 1, 1, self.L, 1))
# Compute the kernel (Q, C, N, L, H)
kernel = o * b * c / (z / t - d)
kernel = kernel.sum(dim=2)
kernel = torch.fft.irfft(kernel, n=kernel.size(-2), dim=-2)
return kernel.permute(2, 0, 1, 3).contiguous().float()
"""
# do we need masking?
def forward(self, u: torch.Tensor):
# Get sequence length (L, B, Q, H)
length = u.size(0)
# Double length if needed
while length > self.maxlen:
self.setup(
self.maxlen,
dtype=torch.cfloat,
device=self.diagonal.device,
double=True,
)
# print(self.z.dtype)
# This would be call only once at the beginning of fp16 training
if self.z.dtype == torch.float16:
self.setup_omega_z(dtype=torch.cfloat, device=self.diagonal.device)
# For FP16 conversion
fp16 = u.dtype == torch.float16
# Perform state space modelling (L, Q, C, H)
k = self.get_kernel()[:length] # get kernel always in fp32?
# print("kernel type", k.type())
# k = self.get_kernel_lazy()[:length]
# Now compute the fourier transform
# breakpoint()
# k = k.type_as(u)
k_f = torch.fft.rfft(k.float(), n=2 * length, dim=0)
uu = u.to(torch.float32) if fp16 else u
u_f = torch.fft.rfft(uu, n=2 * length, dim=0)
x_f = contract("lqch,lbqh->lbqch", k_f, u_f)
# print("fourier dtype", k_f.type(), u_f.type())
# Get the output without transformation or skip connection
x = torch.fft.irfft(x_f, n=2 * length, dim=0)[:length]
x = x.to(torch.float16) if fp16 else x
# Get the full output
return x + contract("qch,lbqh->lbqch", self.get_skip_matrix(), u)
class MHS4(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: Optional[int] = None,
projection_dim: Optional[int] = None,
hidden_dim: int = 64,
num_heads: int = 1,
activation: Optional[str] = "gelu",
channels: int = 1,
rank: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
dropout: float = 0.00,
use_final_linear: bool = True,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu: bool = True
):
super().__init__()
# Only a rank of 1 is supported
assert rank == 1
# Store inputs
self.input_dim = input_dim
self.output_dim = output_dim or input_dim
self.projection_dim = projection_dim or input_dim // num_heads
self.hidden_dim = hidden_dim
self.num_heads = num_heads
self.channels = channels
self.parameter_noise = parameter_noise
# GLU activation requires double the channels
glu = activation == "glu"
# Increase number of channels for glu
self.channels *= 2 if glu else 1
# Input is divisible by number of heads
assert self.input_dim % self.num_heads == 0
# Projection layer
self.projweight, self.projbias = (
self.init_linear(
sizew=(self.num_heads, self.projection_dim, input_dim),
sizeb=(self.num_heads, self.projection_dim),
)
if self.num_heads > 1
else (None, None)
)
# SSM Layer
self.ssm = TiedStateSpaceModel(
input_dim=self.projection_dim,
hidden_dim=hidden_dim,
num_heads=num_heads,
channels=self.channels,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
# Dropout and activation following ssm
self.activation = get_activation(activation)
self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
# Final linear layer weight
self.out = (
Linear(
input_dim=self.projection_dim * self.num_heads,
output_dim=self.output_dim,
parameter_noise=parameter_noise,
)
if use_final_linear
else nn.Identity()
)
def init_linear(self, sizew, sizeb):
# Weight matrix
weight = nn.Parameter(torch.empty(sizew))
init.kaiming_uniform_(weight, a=math.sqrt(5))
# Bias vector
bias = nn.Parameter(torch.empty(sizeb))
fan_in, _ = init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(bias, -bound, bound)
return weight, bias
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
@torch.no_grad()
def setup(self, maxlen, dtype, device, double=False):
self.ssm.setup(maxlen, dtype, device, double=double)
@torch.no_grad()
def setup_linear(self):
self.ssm.setup_linear()
def projection_linear(self, x):
# Input of shape (L, B, H) -> (L, B, Q, H)
if self.projweight is None:
return x.unsqueeze(-2)
# Noisy training
projweight = self.get_noisy_weight(self.projweight)
projbias = self.get_noisy_weight(self.projbias)
l, b, n = x.size(0), x.size(1), x.size(2)
q, k = projweight.size(0), projweight.size(1)
# this op is cast to fp16
out1 = torch.mm(x.view(l * b, n), projweight.view(q * k, n).transpose(0, 1).contiguous())
# this op always outputs float32
out = out1.view(l, b, q, k).add_(projbias.type_as(out1))
return out
# return contract("qkn,lbn->lbqk", projweight, x) + projbias
def forward(self, u: torch.Tensor):
# Assumes the input is of shape (L, B, H)
u = self.projection_linear(u)
u = self.ssm(u)
u = rearrange(u, "l b q c h -> l b (q c h)")
u = self.dropout(self.activation(u))
u = self.out(u)
return u
def build_stacked_mh_s4(
num_layers: int = 1,
only_activate_last: bool = False,
input_dim: int = 512,
intermediate_dim: int = 512,
output_dim: Optional[int] = None,
hidden_dim: int = 32,
num_heads: int = 1,
activation: str = "gelu",
channels: int = 1,
rank: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
dropout: float = 0.10,
remove_final_linear: bool = False,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu = True
):
# Build all layers sequentially
layers = []
# Decide on output dimension
output_dim = output_dim or input_dim
# Starting first layer build with activation if single layer or activated when stacked
use_activation = num_layers == 1 or not only_activate_last
# Do not use final linear layer if we have multiple heads in stacked mode since there's a following projection
# This is also to reduce the number of parameters
use_final_linear = (num_heads == 1) or (num_layers == 1)
layers.append(
MHS4(
input_dim=input_dim,
output_dim=intermediate_dim if num_layers > 1 else output_dim,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation if use_activation else None,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=use_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
)
# Intermediate layers
# Ensure each head dimension is consistent
assert intermediate_dim % num_heads == 0
for i in range(num_layers - 2):
layers.append(
MHS4(
input_dim=input_dim
if (not use_final_linear and i == 0)
else intermediate_dim,
output_dim=intermediate_dim,
projection_dim=intermediate_dim // num_heads,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation if use_activation else None,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=use_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
)
)
# Final layer, requires larger projection layers for higher intermediate projections
# Ensure that the output is divisible
assert output_dim % num_heads == 0
if num_layers > 1:
layers.append(
MHS4(
input_dim=input_dim
if (not use_final_linear and num_layers == 2)
else intermediate_dim,
output_dim=output_dim,
projection_dim=intermediate_dim // num_heads,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=True,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
)
)
# Get the final layer and remove its linear layer if needed
if remove_final_linear:
assert (
intermediate_dim == input_dim
), "Removing the final linear layer is only allowed when the intermediate dimension matches the input"
layers[-1].out = nn.Identity()
return nn.Sequential(*layers)
class BasicBlock(nn.Module):
def __init__(
self,
input_dim,
main_module,
dropout=0.0,
):
super().__init__()
self.ln = nn.LayerNorm(input_dim)
self.dp = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.main_module = main_module
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
# Assume the input takes shape (T, B, D)
# This makes input -> LayerNorm -> main_module -> dropout -> Residual(+input)
output = self.ln(input)
output = self.main_module(output)
output = self.dp(output)
output = output + input
return output, lengths, []
class BidirectionalBasicBlock(nn.Module):
def __init__(
self,
input_dim,
forward_module,
backward_module,
dropout=0.0,
parameter_noise=0.0,
residual_norm=True,
):
super().__init__()
if residual_norm:
self.ln = nn.LayerNorm(input_dim)
self.dp = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
else:
self.ln = self.dp = None
self.forward_module = forward_module
self.backward_module = backward_module
self.linear = Linear(
input_dim=input_dim * 2,
output_dim=input_dim,
parameter_noise=parameter_noise,
)
def reverse_padded_sequence(self, input, lengths):
# return input.flip(dims=[0])
# Assuming input is of shape BTD
output = torch.zeros_like(input)
for i, length in enumerate(lengths):
output[:length, i] = input[:length, i].flip(0)
return output
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
# Assume the input takes shape (T, B, D)
if self.ln is not None:
output = self.ln(input)
else:
output = input
output_flip = self.reverse_padded_sequence(output, lengths)
# Forward/backward module
f_output = self.forward_module(output)
b_output = self.backward_module(output_flip)
b_output_flip = self.reverse_padded_sequence(b_output, lengths)
# Concatenation and reduction to correct dim (B, T, D)
output = torch.cat([f_output, b_output_flip], dim=-1)
if self.ln is not None:
output = self.dp(self.linear(output))
output = output + input
else:
output = self.linear(output)
return output, lengths, []
# For backward compatibility
class mySequentialv2(nn.ModuleList):
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
for module in self._modules.values():
input, lengths, state = module(input, lengths, state)
return input, lengths, state
class MHBiS4Layer(nn.Module):
def __init__(
self,
input_dim: int = 512,
mssm_num_modules: int = 1, # what is this?
mssm_num_stacks: int = 2,
mssm_only_activate_last: bool = False,
mssm_intermediate_dim: int = 512,
mssm_hidden_dim: int = 32,
mssm_num_heads: int = 1,
mssm_activation: str = "gelu",
mssm_rank: int = 1,
mssm_scale: float = 0.50,
mssm_maxlen: int = 256,
mssm_timestep_min: float = 0.010,
mssm_timestep_max: float = 0.160,
mssm_dropout: float = 0.10,
mssm_remove_final_linear: bool = False,
ffn_activation: str = "gelu",
ffn_dim: int = 2048,
ffn_dropout: float = 0.10,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
s4_only=False,
create_on_gpu=True
):
super().__init__()
forward_ssm_modules = [
build_stacked_mh_s4(
num_layers=mssm_num_stacks,
only_activate_last=mssm_only_activate_last,
input_dim=input_dim,
intermediate_dim=mssm_intermediate_dim,
output_dim=input_dim,
hidden_dim=mssm_hidden_dim,
num_heads=mssm_num_heads,
activation=mssm_activation,
rank=mssm_rank,
scale=mssm_scale,
maxlen=mssm_maxlen,
timestep_min=mssm_timestep_min,
timestep_max=mssm_timestep_max,
dropout=mssm_dropout,
remove_final_linear=mssm_remove_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
for _ in range(mssm_num_modules)
]
backward_ssm_modules = [
build_stacked_mh_s4(
num_layers=mssm_num_stacks,
only_activate_last=mssm_only_activate_last,
input_dim=input_dim,
intermediate_dim=mssm_intermediate_dim,
output_dim=input_dim,
hidden_dim=mssm_hidden_dim,
num_heads=mssm_num_heads,
activation=mssm_activation,
rank=mssm_rank,
scale=mssm_scale,
maxlen=mssm_maxlen,
timestep_min=mssm_timestep_min,
timestep_max=mssm_timestep_max,
dropout=mssm_dropout,
remove_final_linear=mssm_remove_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
for _ in range(mssm_num_modules)
]
self.ssm_block = mySequentialv2(
[
BidirectionalBasicBlock(
input_dim=input_dim,
forward_module=fmodule,
backward_module=bmodule,
dropout=mssm_dropout,
parameter_noise=parameter_noise,
residual_norm=not s4_only
)
for fmodule, bmodule in zip(forward_ssm_modules, backward_ssm_modules)
]
)
if not s4_only:
ffn_module = nn.Sequential(
Linear(
input_dim=input_dim,
output_dim=ffn_dim * (2 if ffn_activation == "glu" else 1),
parameter_noise=parameter_noise,
),
get_activation(ffn_activation),
nn.Dropout(ffn_dropout) if ffn_dropout > 0.0 else nn.Identity(),
Linear(
input_dim=ffn_dim,
output_dim=input_dim,
parameter_noise=parameter_noise,
),
)
self.ffn_block = BasicBlock(
input_dim=input_dim, main_module=ffn_module, dropout=ffn_dropout
)
else:
self.ffn_block = None
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
output = input
output, _, _ = self.ssm_block(output, lengths, state)
if self.ffn_block is not None:
output, _, _ = self.ffn_block(output, lengths, state)
return output, lengths, []
class MHBiS4EncoderLayer(nn.Module):
def __init__(self, cfg, s4_only=False, create_on_gpu=True):
super().__init__()
self.module = self.build_module(cfg, s4_only=s4_only, create_on_gpu=create_on_gpu)
def build_module(self, cfg, s4_only=False, create_on_gpu=True):
return MHBiS4Layer(
input_dim = cfg.encoder_embed_dim,
mssm_num_modules = 1,
mssm_num_stacks = cfg.encoder_mssm_num_stacks,
mssm_only_activate_last = False,
mssm_intermediate_dim = cfg.encoder_embed_dim,
mssm_hidden_dim = cfg.encoder_mssm_hidden_dim,
mssm_num_heads = cfg.encoder_mssm_num_heads,
mssm_activation = cfg.encoder_mssm_activation,
mssm_rank = 1,
mssm_scale = cfg.encoder_mssm_scale,
mssm_maxlen = cfg.encoder_mssm_maxlen,
mssm_timestep_min = cfg.encoder_mssm_timestep_min,
mssm_timestep_max = cfg.encoder_mssm_timestep_max,
mssm_dropout = cfg.dropout,
mssm_remove_final_linear = True,
ffn_activation = cfg.activation_fn,
ffn_dim = cfg.encoder_ffn_embed_dim,
ffn_dropout = cfg.relu_dropout or 0,
parameter_noise = 0.00,
use_fast_kernel = True , # Why?
s4_only=s4_only,
create_on_gpu=create_on_gpu
)
@torch.no_grad()
def infer_lengths(self, batch, maxlen, encoder_padding_mask: Optional[Tensor]):
# Assume non padding elements are part of sequence
lengths = (encoder_padding_mask.ne(1)).sum(-1)
return lengths.to(int)
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
attn_bias: Optional[Tensor] = None # relative position encoding
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
bsz, seq_len = x.size(1), x.size(0)
if encoder_padding_mask is None:
encoder_padding_mask = x.new_zeros(bsz, seq_len)
lengths = self.infer_lengths(
batch = x.size(1),
maxlen = x.size(0),
encoder_padding_mask=encoder_padding_mask,
)
x, _, _ = self.module(x, lengths)
return x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
pass
if __name__ == "__main__":
import json
from types import SimpleNamespace
from random import randint
def json_to_namespace(json_file):
with open(json_file) as f:
x = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
for name in x.__dict__:
if x.__dict__[name] in ['False', 'True']:
x.__dict__[name] = (x.__dict__[name] == 'True')
return x
cfg = json_to_namespace("mssm_config.json")
s4_layer = MHBiS4EncoderLayer(cfg, s4_only=True, create_on_gpu=True)
print(s4_layer)
s4_layer = s4_layer.cuda()
t = 512
b = 16
h = 1024
x = torch.randn(*(t, b, h)).cuda()
mask = torch.ones(*(b, t), dtype=torch.bool)
for i in range(b):
l = randint(t//2, t)
mask[i][0:l].fill_(0)
x = x.half()
print(x.size(), x.type())
with autocast(enabled=True, dtype=torch.float16):
output = s4_layer(x, mask)
print(output.size())
print(output.sum())
n_params = 0
for param in s4_layer.parameters():
n_params += param.numel()
print(n_params)
print(n_params * 24 )
| 37,369
| 32.515695
| 114
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/mssm/fft_convolution.py
|
import torch
| 14
| 4
| 12
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/ssm_kernel_coefficient.py
|
#!/usr/bin/env python3
import torch
from opt_einsum import contract
import os
import pathlib
import ssm_kernel_coefficient_cuda
# from torch.utils.cpp_extension import load
# ssm_kernel_coefficient_binding = load(
# name="ssm_kernel_coefficient_binding",
# sources=[
# os.path.join(
# pathlib.Path(__file__).parent.resolve(),
# "ssm_kernel_coefficient_binding_cuda.cu"
# )
# ],
# verbose = True
# )
# pyre-ignore
# from ssm_kernel_coefficient_binding import (
# kernel_coefficient_backward_double,
# kernel_coefficient_backward_float,
# kernel_coefficient_forward_double,
# kernel_coefficient_forward_float,
# )
def compute_kernel_coefficient(z, d, t, b, c, fast=False):
if not fast or not z.is_cuda:
return compute_slow(z, d, t, b, c)
return compute_fast(z, d, t, b, c)
def get_dwoodbury(z, d, invt):
# Get the bilinear transformation
z = contract("l,qh->qlh", torch.view_as_complex(z), invt)
# Compute the term and reuse computations (Q, L, H, N)
return 1 / (z.unsqueeze(-1) - d.unsqueeze(-2).unsqueeze(-2))
def compute_slow(z, d, t, b, c):
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
d = d.to(z.dtype)
r = get_dwoodbury(z, d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
return kernelcc
def compute_fast(z, d, t, b, c):
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
fp16 = (t.dtype == torch.float16)
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
zz = contract("l,qh->qlh", torch.view_as_complex(z), 1 / t) # (Q, L, H)
bc = contract("qihn,qchn->icqhn", b, c).to(zz.dtype) # (I, C, Q, H, N)
I, C, Q, H, N = bc.shape
bc = bc.view(-1, Q, H, N)
L = zz.shape[1]
d = d.to(zz.dtype) # (Q, N)
coeff = KernelCoefficientFast.apply(bc, zz, d) # (IC, Q, L, H)
coeff = coeff.view(I, C, Q, L, H).permute(2, 0, 1, 3, 4)
if fp16:
coeff = coeff.to(torch.float16)
return coeff
# return coeff.view(I, C, Q, L, H).permute(2, 0, 1, 3, 4) # (Q, I, C, L, H)
class KernelCoefficientFast(torch.autograd.Function):
# Compute sum{n} { a[n] / (b[l] - c[n]) }
@staticmethod
def forward(ctx, a_n, b_l, c_n):
if not a_n.is_cuda and b_l.is_cuda and c_n.is_cuda:
raise NotImplementedError("Only support CUDA tensors")
ctx.save_for_backward(a_n, b_l, c_n)
is_float = 1
if b_l.dtype == torch.complex128:
is_float = 0
return ssm_kernel_coefficient_cuda.forward(a_n, b_l, c_n, is_float)
@staticmethod
def backward(ctx, dout):
a_n, b_l, c_n = ctx.saved_tensors
is_float = 1
if b_l.dtype == torch.complex128:
is_float = 0
da_n, db_l, dc_n = ssm_kernel_coefficient_cuda.backward(a_n, b_l, c_n, dout, is_float)
return da_n, db_l, dc_n
if __name__ == "__main__":
# Test
num_heads = 4
input_dim = 64
hid_dim = 32
seq_len = 256
dtype=torch.float32
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(0)
b = torch.randn(num_heads, 2, input_dim, hid_dim, device=device, dtype=dtype).requires_grad_(True)
c = torch.randn(num_heads, 2, input_dim, hid_dim, device=device, dtype=dtype).requires_grad_(True)
z = torch.randn(seq_len, 2, device=device, dtype=dtype)
t = torch.randn(num_heads, input_dim, device=device, dtype=dtype).requires_grad_(True)
d = torch.randn(num_heads, hid_dim, device=device, dtype=dtype).requires_grad_(True)
zz = z.to(torch.float64)
dd = d.to(torch.float64)
tt = t.to(torch.float64)
bb = b.to(torch.float64)
cc = c.to(torch.float64)
ans64 = compute_slow(zz, dd, tt, bb, cc)
ans = compute_slow(z, d, t, b, c)
out64 = compute_fast(zz, dd, tt, bb, cc)
out = compute_fast(z, d, t, b, c)
err = torch.rand_like(out)
ans64_dd, ans64_dt, ans64_db, ans64_dc = torch.autograd.grad(
ans64, (dd, tt, bb, cc), err, retain_graph=True
)
ans_dd, ans_dt, ans_db, ans_dc = torch.autograd.grad(
ans, (d, t, b, c), err, retain_graph=True
)
out64_dd, out64_dt, out64_db, out64_dc = torch.autograd.grad(
out64, (dd, tt, bb, cc), err, retain_graph=True
)
out_dd, out_dt, out_db, out_dc = torch.autograd.grad(
out, (d, t, b, c), err, retain_graph=True
)
print()
print("out: max abs error (ans64, out64)", torch.max(torch.abs(out64 - ans64)))
print("dd: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dd - out64_dd)))
print("dt: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dt - out64_dt)))
print("db: max abs error (ans64, out64)", torch.max(torch.abs(ans64_db - out64_db)))
print("dc: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dc - out64_dc)))
print()
print("out: max abs error (ans64, out)", torch.max(torch.abs(out - ans64)))
print("dd: max abs error (ans64, out)", torch.max(torch.abs(ans64_dd - out_dd)))
print("dt: max abs error (ans64, out)", torch.max(torch.abs(ans64_dt - out_dt)))
print("db: max abs error (ans64, out)", torch.max(torch.abs(ans64_db - out_db)))
print("dc: max abs error (ans64, out)", torch.max(torch.abs(ans64_dc - out_dc)))
print()
print("out: max abs error (ans, out64)", torch.max(torch.abs(out64 - ans)))
print("dd: max abs error (ans, out64)", torch.max(torch.abs(ans_dd - out64_dd)))
print("dt: max abs error (ans, out64)", torch.max(torch.abs(ans_dt - out64_dt)))
print("db: max abs error (ans, out64)", torch.max(torch.abs(ans_db - out64_db)))
print("dc: max abs error (ans, out64)", torch.max(torch.abs(ans_dc - out64_dc)))
print()
print("out: max abs error (ans, out)", torch.max(torch.abs(out - ans64)))
print("dd: max abs error (ans, out)", torch.max(torch.abs(ans_dd - out_dd)))
print("dt: max abs error (ans, out)", torch.max(torch.abs(ans_dt - out_dt)))
print("db: max abs error (ans, out)", torch.max(torch.abs(ans_db - out_db)))
print("dc: max abs error (ans, out)", torch.max(torch.abs(ans_dc - out_dc)))
| 6,529
| 35.077348
| 102
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/setup.py
|
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
from pathlib import Path
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
ext_modules.append(
CUDAExtension(
name="ssm_kernel_coefficient_cuda",
sources=[
"ssm_kernel_coefficient_binding.cpp",
"ssm_kernel_coefficient_binding_cuda.cu"
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir)
],
)
)
setup(
name='ssm_cuda_bindings',
version='0.1', \
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 5,691
| 34.798742
| 101
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm_modules.py
|
# --------------------------------------------------------
# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf)
# Github source: https://github.com/microsoft/unilm/tree/master/wavlm
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import math
import warnings
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.optimized.linear import Linear
from onmt.modules.optimized.self_attention_attnbias_func import self_attn_bias_func
class WavLMMultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
num_buckets=32,
max_distance=128,
gru_rel_pos=False,
rescale_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.has_relative_attention_bias = has_relative_attention_bias
self.num_buckets = num_buckets
self.max_distance = max_distance
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
self.head_dim = embed_dim // num_heads
self.q_head_dim = self.head_dim
self.k_head_dim = self.head_dim
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
k_bias = True
if rescale_init:
k_bias = False
k_embed_dim = embed_dim
q_embed_dim = embed_dim
self.k_proj = Linear(self.kdim, k_embed_dim, bias=k_bias)
self.v_proj = Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = Linear(embed_dim, q_embed_dim, bias=bias)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.gru_rel_pos = gru_rel_pos
if self.gru_rel_pos:
self.grep_linear = Linear(self.q_head_dim, 8)
self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
self.reset_parameters()
self.fast_attention = False
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
if self.has_relative_attention_bias:
nn.init.xavier_normal_(self.relative_attention_bias.weight)
def _relative_positions_bucket(self, relative_positions, bidirectional=True):
num_buckets = self.num_buckets
max_distance = self.max_distance
relative_buckets = 0
if bidirectional:
num_buckets = num_buckets // 2
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
relative_positions = torch.abs(relative_positions)
else:
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
max_exact = num_buckets // 2
is_small = relative_positions < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_positions.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_positions_bucket(
relative_position,
bidirectional=True
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1])
return values
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = False,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if self.has_relative_attention_bias and position_bias is None:
position_bias = self.compute_bias(tgt_len, src_len)
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
if (
not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and self.q_head_dim == self.head_dim
):
assert key is not None and value is not None
assert attn_mask is None
attn_mask_rel_pos = None
if position_bias is not None:
attn_mask_rel_pos = position_bias
if self.gru_rel_pos:
# from [T x B x H] to [B x T x H]
query_layer = query.transpose(0, 1)
# [B x T x head x -1]
new_x_shape = query_layer.size()[:-1] + (self.num_heads, -1)
# [B x T x head x head_size]
query_layer = query_layer.view(*new_x_shape)
# [B x H x T x head_size]
query_layer = query_layer.permute(0, 2, 1, 3)
_B, _H, _L, __ = query_layer.size()
gate_input = self.grep_linear(query_layer).view(
_B, _H, _L, 2, 4).sum(-1, keepdim=False)
# inplace sigmoid
gate_a, gate_b = gate_input.sigmoid_().chunk(2, dim=-1)
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias
attn_mask_rel_pos = attn_mask_rel_pos.view((-1, tgt_len, tgt_len))
else:
attn_mask_rel_pos = query.new_zeros(*(bsz * self.num_heads, tgt_len, tgt_len))
# k_proj_bias = self.k_proj.bias
# if k_proj_bias is None:
# k_proj_bias = torch.zeros_like(self.q_proj.bias)
# if self.fast_attention:
# is_training = self.training
# low_precision = True
# in_proj_weight = self.proj_weight
# out_proj_weight = self.out_proj.weight
# recompute = False
# rotary = False
# positions = None
#
# x, attn = self_attn_bias_func(False, is_training, self.num_heads, query, attn_mask_rel_pos,
# in_proj_weight, out_proj_weight,
# self.proj_bias, self.out_proj.bias,
# key_padding_mask, self.dropout_module.p,
# rotary, positions,
# False, None, # incremental and state and double precision
# low_precision, True, recompute) # learnable_pos + return-coverage
# else:
x, attn = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
# self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask_rel_pos,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
return x, attn, position_bias
else:
# this code path is never reached with wavlm
raise NotImplementedError
def convert_fast_attention(self):
pass
# if self.fast_attention:
# return
# self.fast_attention = True
# assert self.qkv_same_dim, "Only works with QKV same dim."
# w_q = self.q_proj.weight.clone()
# w_k = self.k_proj.weight.clone()
# w_v = self.v_proj.weight.clone()
# weights = [w_q, w_k, w_v]
# weight_ = torch.cat(weights, dim=0).contiguous()
#
# b_q = self.q_proj.bias.clone()
# b_k = self.k_proj.bias.clone()
# b_v = self.v_proj.bias.clone()
# biases = [b_q, b_k, b_v]
# bias_ = torch.cat(biases, dim=0).contiguous()
#
# head_dim = self.head_dim
# heads = self.num_heads
# input_dim = self.embed_dim
#
# # when we concatenate the weights, the output has the size 3 * D (3 -> heads -> head_dim)
# # the fast attention module requires (heads -> 3 -> head_dim)
# weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1). \
# reshape(-1, input_dim)
#
# bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
#
# weight_t = torch.Tensor(3 * input_dim, input_dim)
# bias_t = torch.Tensor(3 * input_dim)
# weight_t.copy_(weight_)
# bias_t.copy_(bias_)
# self.proj_weight = Parameter(weight_t)
# self.proj_bias = Parameter(bias_t)
#
# self.proj_weight.requires_grad = self.q_proj.weight.requires_grad
# self.proj_bias.requires_grad = self.q_proj.bias.requires_grad
# del self.q_proj, self.k_proj, self.v_proj
def get_activation_fn(activation: str):
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "glu":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
class GLU_Linear(nn.Module):
def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True):
super(GLU_Linear, self).__init__()
self.glu_type = glu_type
self.output_dim = output_dim
if glu_type == "sigmoid":
self.glu_act = torch.nn.Sigmoid()
elif glu_type == "swish":
self.glu_act = torch.nn.SiLU()
elif glu_type == "relu":
self.glu_act = torch.nn.ReLU()
elif glu_type == "gelu":
self.glu_act = torch.nn.GELU()
if bias_in_glu:
self.linear = nn.Linear(input_dim, output_dim * 2, True)
else:
self.linear = nn.Linear(input_dim, output_dim * 2, False)
def forward(self, x):
# to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case
x = self.linear(x)
if self.glu_type == "bilinear":
x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2])
else:
x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2]))
return x
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, WavLMMultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
| 18,345
| 39.320879
| 186
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/base_fairseq.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/enum.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
# https://github.com/facebookresearch/hydra/issues/1156
@classmethod
def __instancecheck__(cls, other):
return "enum" in str(type(other))
class StrEnum(Enum, metaclass=StrEnumMeta):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def __hash__(self):
return hash(str(self))
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
DDP_BACKEND_CHOICES = ChoiceEnum([
"c10d", # alias for pytorch_ddp
"fully_sharded", # FullyShardedDataParallel from fairscale
"legacy_ddp",
"no_c10d", # alias for legacy_ddp
"pytorch_ddp",
"slow_mo",
])
DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"])
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"])
GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"])
GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum(
["unigram", "ensemble", "vote", "dp", "bs"]
)
ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"])
PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
| 1,753
| 31.481481
| 107
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/adapter.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from onmt.modules.layer_norm import LayerNorm
class Adapter(torch.nn.Module):
def __init__(self, input_dim, downsample_factor=2):
self.input_dim = input_dim
self.middle_dim = input_dim // downsample_factor
super(Adapter, self).__init__()
self.linear_in = nn.Linear(input_dim, self.middle_dim)
self.linear_out = nn.Linear(self.middle_dim, input_dim)
self.norm = LayerNorm(input_dim)
self.fused = False
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
self.reset_parameters()
def reset_parameters(self):
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
with torch.no_grad():
normal_(self.linear_in.weight.data)
normal_(self.linear_out.weight.data)
self.linear_in.bias.data.zero_()
self.linear_out.bias.data.zero_()
def forward(self, input):
if self.fused:
weights = [self.linear_in.weight, self.linear_out.weight]
biases = [self.linear_in.bias, self.linear_out.bias]
# seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
input_norm = self.norm(input)
input = self.fused_function(0.0, False, input_norm,
*weights, *biases)
return input
else:
return self.linear_out(F.relu(self.linear_in(self.norm(input))))
class MultilingualAdapter(torch.nn.Module):
def __init__(self, n_languages, input_size, downsample_factor=4):
self.n_languages = n_languages
self.input_size = input_size
super(MultilingualAdapter, self).__init__()
self.adapters = nn.ModuleList([Adapter(input_size, downsample_factor) for _ in range(self.n_languages)])
def forward(self, input, lang=None, mixture=None):
"""
:param input: tensor TxBxH
:param lang: tensor size 1 (language for the batch)
:param mixture: tensor size B x n_language (mixture for the minibatch)
:return:
"""
if lang is not None:
assert mixture is None
if lang.numel() != 1:
print("Expected singled unit tensor, but get", lang.size())
assert lang.numel() == 1
adapter = self.adapters[lang.item()]
return adapter(input)
if mixture is not None:
assert mixture.size(0) == input.size(1) and mixture.size(1) == self.n_languages
outputs = list()
for i in range(self.n_languages):
# mixture size is [B x n_language]
mixture_weight = mixture[:, i].unsqueeze(0).squeeze(-1)
outputs.append(self.adapters[i](input)) * mixture_weight
outputs = torch.stack(outputs).sum(0) # n_languages x T x B x H
outputs = torch.sum(outputs, 0, keepdim=False) # -> T x B x H
return outputs
| 3,345
| 34.595745
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/utils.py
|
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from omegaconf import DictConfig, OmegaConf, open_dict, _utils
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
from omegaconf import DictConfig, OmegaConf, open_dict, _utils
from argparse import ArgumentError, ArgumentParser, Namespace
import numpy as np
import torch
def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]):
# this will be deprecated when we get rid of argparse and model_overrides logic
REGISTRIES = {}
with open_dict(cfg):
for k in cfg.keys():
# "k in cfg" will return false if its a "mandatory value (e.g. ???)"
if k in cfg and isinstance(cfg[k], DictConfig):
if k in overrides and isinstance(overrides[k], dict):
for ok, ov in overrides[k].items():
if isinstance(ov, dict) and cfg[k][ok] is not None:
overwrite_args_by_name(cfg[k][ok], ov)
else:
cfg[k][ok] = ov
else:
overwrite_args_by_name(cfg[k], overrides)
elif k in cfg and isinstance(cfg[k], Namespace):
for override_key, val in overrides.items():
setattr(cfg[k], override_key, val)
elif k in overrides:
if (
k in REGISTRIES
and overrides[k] in REGISTRIES[k]["dataclass_registry"]
):
cfg[k] = DictConfig(
REGISTRIES[k]["dataclass_registry"][overrides[k]]
)
overwrite_args_by_name(cfg[k], overrides)
cfg[k]._name = overrides[k]
else:
cfg[k] = overrides[k]
class omegaconf_no_object_check:
def __init__(self):
self.old_is_primitive = _utils.is_primitive_type
def __enter__(self):
_utils.is_primitive_type = lambda _: True
def __exit__(self, type, value, traceback):
_utils.is_primitive_type = self.old_is_primitive
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in fairseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides, strict=False)
except:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
with omegaconf_no_object_check():
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(
cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler]
)
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
OmegaConf.set_struct(cfg, True)
return cfg
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == "xla"
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from .fairseq_modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
| 11,673
| 36.536977
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wav2vec2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import compute_mask_indices, get_activation_fn, get_available_activation_fns
from .enum import ChoiceEnum
from torch.cuda.amp import autocast
from .fairseq_modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
MultiheadAttention,
SamePad,
TransposeLast,
index_copy
)
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import factorize_linear
from .utils import buffered_arange, index_put, is_xla_tensor
# from fairseq.dataclass import FairseqDataclass
# from fairseq.models.wav2vec import Wav2Vec2Config
from .dataclass import Wav2Vec2Config
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
def dropout_residual_connection(x, residual, dropout_module, is_training):
if fused_dropout_add is not None and dropout_module.p > 0 and is_training:
return fused_dropout_add(x, residual, dropout_module.p, is_training)
return dropout_module(x) + residual
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
if not module.fast_attention:
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
else:
normal_(module.proj_weight.data)
#
# @dataclass
# class Wav2Vec2Config(FairseqDataclass):
# extractor_mode: EXTRACTOR_MODE_CHOICES = field(
# default="default",
# metadata={
# "help": "mode for feature extractor. default has a single group norm with d "
# "groups in the first conv block, whereas layer_norm has layer norms in "
# "every block (meant to use with normalize=True)"
# },
# )
# encoder_layers: int = field(
# default=12, metadata={"help": "num encoder layers in the transformer"}
# )
# encoder_embed_dim: int = field(
# default=768, metadata={"help": "encoder embedding dimension"}
# )
# encoder_ffn_embed_dim: int = field(
# default=3072, metadata={"help": "encoder embedding dimension for FFN"}
# )
# encoder_attention_heads: int = field(
# default=12, metadata={"help": "num encoder attention heads"}
# )
# activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
# default="gelu", metadata={"help": "activation function to use"}
# )
#
# # dropouts
# dropout: float = field(
# default=0.1, metadata={"help": "dropout probability for the transformer"}
# )
# attention_dropout: float = field(
# default=0.1, metadata={"help": "dropout probability for attention weights"}
# )
# activation_dropout: float = field(
# default=0.0, metadata={"help": "dropout probability after activation in FFN"}
# )
# encoder_layerdrop: float = field(
# default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
# )
# dropout_input: float = field(
# default=0.0,
# metadata={"help": "dropout to apply to the input (after feat extr)"},
# )
# dropout_features: float = field(
# default=0.0,
# metadata={"help": "dropout to apply to the features (after feat extr)"},
# )
#
# final_dim: int = field(
# default=0,
# metadata={
# "help": "project final representations and targets to this many dimensions."
# "set to encoder_embed_dim is <= 0"
# },
# )
# layer_norm_first: bool = field(
# default=False, metadata={"help": "apply layernorm first in the transformer"}
# )
# conv_feature_layers: str = field(
# default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
# metadata={
# "help": "string describing convolutional feature extraction layers in form of a python list that contains "
# "[(dim, kernel_size, stride), ...]"
# },
# )
# conv_bias: bool = field(
# default=False, metadata={"help": "include bias in conv encoder"}
# )
# logit_temp: float = field(
# default=0.1, metadata={"help": "temperature to divide logits by"}
# )
# quantize_targets: bool = field(
# default=False, metadata={"help": "use quantized targets"}
# )
# quantize_input: bool = field(
# default=False, metadata={"help": "use quantized inputs"}
# )
# same_quantizer: bool = field(
# default=False, metadata={"help": "use same quantizer for inputs and targets"}
# )
# target_glu: bool = field(
# default=False, metadata={"help": "adds projection + glu to targets"}
# )
# feature_grad_mult: float = field(
# default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
# )
# quantizer_depth: int = field(
# default=1,
# metadata={"help": "number of quantizer layers"},
# )
# quantizer_factor: int = field(
# default=3,
# metadata={
# "help": "dimensionality increase for inner quantizer layers (if depth > 1)"
# },
# )
# latent_vars: int = field(
# default=320,
# metadata={"help": "number of latent variables V in each group of the codebook"},
# )
# latent_groups: int = field(
# default=2,
# metadata={"help": "number of groups G of latent variables in the codebook"},
# )
# latent_dim: int = field(
# default=0,
# metadata={
# "help": "if > 0, uses this dimensionality for latent variables. "
# "otherwise uses final_dim / latent_groups"
# },
# )
#
# # masking
# mask_length: int = field(default=10, metadata={"help": "mask length"})
# mask_prob: float = field(
# default=0.65, metadata={"help": "probability of replacing a token with mask"}
# )
# mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
# default="static", metadata={"help": "how to choose mask length"}
# )
# mask_other: float = field(
# default=0,
# metadata={
# "help": "secondary mask argument (used for more complex distributions), "
# "see help in compute_mask_indices"
# },
# )
# no_mask_overlap: bool = field(
# default=False, metadata={"help": "whether to allow masks to overlap"}
# )
# mask_min_space: int = field(
# default=1,
# metadata={"help": "min space between spans (if no overlap is enabled)"},
# )
#
# # channel masking
# mask_channel_length: int = field(
# default=10, metadata={"help": "length of the mask for features (channels)"}
# )
# mask_channel_prob: float = field(
# default=0.0, metadata={"help": "probability of replacing a feature with 0"}
# )
# mask_channel_before: bool = False
# mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
# default="static",
# metadata={"help": "how to choose mask length for channel masking"},
# )
# mask_channel_other: float = field(
# default=0,
# metadata={
# "help": "secondary mask argument (used for more complex distributions), "
# "see help in compute_mask_indicesh"
# },
# )
# no_mask_channel_overlap: bool = field(
# default=False, metadata={"help": "whether to allow channel masks to overlap"}
# )
# mask_channel_min_space: int = field(
# default=1,
# metadata={"help": "min space between spans (if no overlap is enabled)"},
# )
#
# # negative selection
# num_negatives: int = field(
# default=100,
# metadata={"help": "number of negative examples from the same sample"},
# )
# negatives_from_everywhere: bool = field(
# default=False,
# metadata={"help": "sample negatives from everywhere, not just masked states"},
# )
# cross_sample_negatives: int = field(
# default=0, metadata={"help": "number of negative examples from the any sample"}
# )
# codebook_negatives: int = field(
# default=0, metadata={"help": "number of negative examples codebook"}
# )
#
# # positional embeddings
# conv_pos: int = field(
# default=128,
# metadata={"help": "number of filters for convolutional positional embeddings"},
# )
# conv_pos_groups: int = field(
# default=16,
# metadata={"help": "number of groups for convolutional positional embedding"},
# )
#
# latent_temp: Tuple[float, float, float] = field(
# default=(2, 0.5, 0.999995),
# metadata={
# "help": "temperature for latent variable sampling. "
# "can be tuple of 3 values (start, end, decay)"
# },
# )
# @register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(torch.nn.Module):
def __init__(self, cfg: Wav2Vec2Config,
favor=False, feature_redraw_interval=1000, auto_check_redraw=True,
weight_drop=0.0, predict_language=False, n_languages=1):
super().__init__()
self.rotary_attention = False
self.relative_attention = False
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before if hasattr(cfg, 'mask_channel_before') else True
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth if hasattr(cfg, 'quantizer_depth') else 1,
weight_proj_factor=cfg.quantizer_factor if hasattr(cfg, 'quantizer_factor') else 3,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg, favor=favor, weight_drop=weight_drop,
predict_language=predict_language, n_languages=n_languages)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
self.favor = favor
def replace_attn_with_s4(self, cfg):
self.encoder.replace_attn_with_s4(cfg)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def clean_unused_weights(self):
self.input_quantizer = None
self.quantizer = None
self.target_glu = None
self.final_proj = None
self.project_q = None
return
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb.type_as(x))
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz, tsz, fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2 ** 30)
if not hasattr(self, "_inftensor"):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
positions=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
precomputed_tdnn=False,
quantize=False, quantize_only=False,
lang=None,
atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
# if the tdnn features are precomputed then skip them
if not precomputed_tdnn:
if self.feature_grad_mult > 0 or source.requires_grad:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
if not features_only:
features_pen = features.float().pow(2).mean()
# transpose from B x C x T to B x T x C (because conv takes input as B x 1 x T)
features = features.transpose(1, 2)
else:
features = source
# perform layer norm ... but check grad mode
current_grad_mode = torch.is_grad_enabled()
if current_grad_mode:
torch.set_grad_enabled(self.layer_norm.weight.requires_grad)
features = self.layer_norm(features)
torch.set_grad_enabled(current_grad_mode)
if quantize:
assert self.quantizer is not None
with torch.no_grad():
quantizer_output = self.quantizer.forward_idx(features)
else:
quantizer_output = None
if features_only:
unmasked_features = None
else:
unmasked_features = features.clone()
if not precomputed_tdnn: # then compute the padding mask after the TDNN step
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if quantize_only:
quantized_x, quantized_target = quantizer_output
output_dict = dict()
output_dict['quantized_x'] = quantized_x # b x t x ?
output_dict['quantized_target'] = quantized_target # b x t x num_groups
output_dict['padding_mask'] = padding_mask
return output_dict
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
# unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
# if self.input_quantizer:
# q = self.input_quantizer(features, produce_targets=False)
# features = q["x"]
# num_vars = q["num_vars"]
# code_ppl = q["code_perplexity"]
# prob_ppl = q["prob_perplexity"]
# curr_temp = q["temp"]
# features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None and not features_only:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results, pred_lang = self.encoder(x, padding_mask=padding_mask, layer=layer, lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if features_only:
output_dict = {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
"pred_lang": pred_lang
}
if quantize:
quantized_x, quantized_target = quantizer_output
output_dict['quantized_x'] = quantized_x # b x t x ?
output_dict['quantized_target'] = quantized_target # b x t x num_groups
return output_dict
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
"x"
]
negs, _ = self.sample_negatives(
neg_cands,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_conv_features(self, source, padding_mask):
with torch.no_grad():
features = self.feature_extractor(source)
# transpose from B x C x T to B x T x C (because conv takes input as B x 1 x T)
features = features.transpose(1, 2).contiguous()
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
bsz, seq_len = features.size(0), features.size(1)
padding_mask = features.new(bsz, seq_len).zero_()
return features, padding_mask.long()
def extract_features(self, source, padding_mask, mask=False, layer=None, precomputed_tdnn=False,
lang=None, atb=None):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer, precomputed_tdnn=precomputed_tdnn,
lang=lang, atb=atb
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self, removing_quantizer=True):
if removing_quantizer:
self.quantizer = None
else:
print("[INFO] Keeping the quantizer")
print(self.quantizer)
# self.groups = groups
# self.combine_groups = combine_groups
# self.input_dim = dim
# self.num_vars = num_vars
# self.time_first = time_first
print("Groups: ", self.quantizer.groups)
print("Combine groups: ", self.quantizer.combine_groups)
print("num vars: ", self.quantizer.num_vars)
print(self.quantizer.vars.size())
self.project_q = None
self.target_glu = None
self.final_proj = None
def add_stacked_encoder(self, stacked_encoder):
self.encoder.add_stacked_encoder(stacked_encoder)
def add_relative_attention(self):
self.relative_attention = True
self.encoder.add_relative_attention()
def add_rotary_attention(self):
self.rotary_attention = True
self.encoder.add_rotary_attention()
def convert_fast_attention(self):
model = self.encoder
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
fast_attentions = find_modules(model, MultiheadAttention)
for fast_attention in fast_attentions:
fast_attention.convert_fast_attention()
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT (only for waveforms with 1 channel)
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args, favor=False, weight_drop=0.0, predict_language=False, n_languages=1):
"""
:param args:
:param favor: Performer Attention
"""
super().__init__()
self.rotary_attention = False
self.positional_encoder = None
self.relative_attention = False
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.favor = favor
self.weight_drop = weight_drop
self.num_heads = args.encoder_attention_heads
self.num_layers = args.encoder_layers
self.attention_dropout = args.attention_dropout
self.activation_dropout = args.activation_dropout
self.deepspeed = False
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
self.layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
weight_drop=self.weight_drop,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
favor=favor
)
for _ in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.args = args
self.apply(init_bert_params)
self.predict_language = predict_language
if self.predict_language:
self.layer_norm_cls = LayerNorm(self.embedding_dim)
self.linear_cls = torch.nn.Linear(self.embedding_dim, n_languages)
else:
self.linear_cls = None
self.layer_norm_cls = None
# from onmt.modules.optimized.fast_mha import fast_bert_mha
# self.fast_bert_mha = fast_bert_mha
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.using_s4 = False
def replace_attn_with_s4(self, cfg):
self.using_s4 = True
for layer in self.layers:
layer.replace_attn_with_s4(cfg)
# add stacked encoder from mbart encoder (purely parameter increase)
def add_stacked_encoder(self, stacked_encoder):
stacked_layers = stacked_encoder.layers
args = self.args
for old_layer in stacked_layers:
new_layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
weight_drop=self.weight_drop,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
favor=self.favor
)
# TODO: check layer norm first between new and old layer
new_layer.load_state_dict(old_layer.state_dict())
self.layers.append(new_layer)
def add_relative_attention(self):
self.relative_attention = True
def convert(m_):
classname = m_.__class__.__name__
if classname.find('MultiheadAttention') != -1:
m_.add_relative_attention()
self.layers.apply(convert)
self.positional_encoder = SinusoidalPositionalEmbedding(self.embedding_dim)
def add_rotary_attention(self):
self.rotary_attention = True
def convert(m_):
classname = m_.__class__.__name__
if classname.find('MultiheadAttention') != -1:
m_.add_rotary_attention()
self.layers.apply(convert)
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(self.embedding_dim // self.num_heads)
def forward(self, x, padding_mask=None, positions=None, layer=None, lang=None, atb=None, checkpointing_ffn=False,
checkpointing_self_attn=False, **kwargs):
x, layer_results, pred_lang = self.extract_features(x, padding_mask, positions, layer, lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results, pred_lang
def extract_features(self, x, padding_mask=None, positions=None, tgt_layer=None, lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.relative_attention and not self.rotary_attention:
positions = None
elif self.relative_attention:
klen = x.size(1)
bsz = x.size(0)
positions = torch.arange(klen - 1, -klen, -1.0, device=x.device, dtype=x.dtype)
pos_emb = self.positional_encoder(positions, bsz=bsz)
pos_emb = F.dropout(pos_emb, p=self.dropout, training=self.training)
positions = pos_emb
elif self.rotary_attention:
positions = self.positional_encoder(x.transpose(0, 1), seq_dim=0)
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# check if flash attention can be run
can_run_fast_bert_mha = False
seq_len = x.size(1)
bsz = x.size(0)
total_bsz = 0
# fast attention refers to using fused QKV matrix multiplication and T-B-H matrix layout to reduce reshaping cost
if self.using_s4:
fast_attention = False
else:
fast_attention = self.layers[0].self_attn.fast_attention
if self.fast_bert_mha and not self.relative_attention and \
fast_attention and x.dtype == torch.half and not self.using_s4:
can_run_fast_bert_mha = True
from onmt.utils import unpad_input
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
# remove paddings from x
x = x.view(-1, x.size(-1)) # flatten [B x T]
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
# maybe pad it so the first dim % 8 = 0?
total_bsz = x.size(0)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
# print("[INFO] CanNOT run FAST MHA with seq_len", seq_len)
max_len = -1
cu_seqlens = None
non_pad_indices = None
# TODO: add classification layer here.
if self.predict_language:
# B x T x H ->
pred_lang = self.linear_cls(self.layer_norm_cls(x))
_lang = torch.nn.functional.softmax(pred_lang, dim=-1, dtype=torch.float32)
else:
pred_lang = None
_lang = lang
if not self.favor and not can_run_fast_bert_mha:
# B x T x C -> T x B x C (only for vanilla self-attention and s4)
x = x.transpose(0, 1)
x = x.contiguous()
# forward pass through layers
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, positions=positions,
max_len=max_len, cu_seqlens=cu_seqlens,
lang=_lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# if we remove padding before (for fast bert MHA) then remember to put padding back
# to restore the form B x T X H
if can_run_fast_bert_mha:
# remove the patch
if x.size(0) > total_bsz:
x = x[:total_bsz, :]
from onmt.utils import pad_input
x = index_copy(x, non_pad_indices, bsz * seq_len)
# transpose [B x T x H] to [T x B x H]
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
if self.predict_language and pred_lang is not None:
pred_lang = index_copy(pred_lang, non_pad_indices, bsz * seq_len)
pred_lang = pred_lang.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
if pred_lang is not None:
pred_lang = pred_lang.transpose(0, 1).contiguous()
return x, layer_results, pred_lang
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def add_adapters(self, n_languages, adapter_location=1):
for layer in self.layers:
layer.add_adapters(n_languages, adapter_location=adapter_location)
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, *kwargs):
for layer in self.layers:
layer.add_factorized(n_languages, rank=rank,
multiplicative=multiplicative, fast=fast, dyrank=dyrank)
def freeze_ffn_params(self):
for layer in self.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
# noinspection PyAttributeOutsideInit
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
weight_drop: float = 0.0,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
favor=False
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.ffn_embedding_dim = ffn_embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.favor = favor
self.has_adapter = False
self.is_factorized = False
self.fast_factorize = False
self.multiplicative_factorize = False
self.using_s4 = False
# Initialize blocks
self.activation_fn = get_activation_fn(activation_fn)
self.activation_fn_name = activation_fn
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
weight_drop=weight_drop,
self_attention=True,
favor=favor
)
self.residual_dropout = dropout
self.dropout1 = nn.Dropout(dropout, inplace=False)
self.dropout2 = nn.Dropout(self.activation_dropout, inplace=True)
self.dropout3 = nn.Dropout(dropout, inplace=False)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def replace_attn_with_s4(self, s4_cfg):
from ..mssm.mhs4 import MHBiS4EncoderLayer
self.using_s4 = True
s4_layer = MHBiS4EncoderLayer(s4_cfg, s4_only=True)
del self.self_attn
self.self_attn = s4_layer
def add_adapters(self, n_languages, downsampling_factor=4, adapter_location=1):
"""
:param n_languages: one adapter per language
:param downsampling_factor: downsampling rate size for the hidden layer
:param adapter_location:
:return:
"""
self.n_languages = n_languages
self.has_adapter = True
self.adapter_location = adapter_location
from .adapter import MultilingualAdapter
self.adapter = MultilingualAdapter(n_languages, self.embedding_dim, downsample_factor=downsampling_factor)
if adapter_location == 2:
self.mid_adapter = MultilingualAdapter(n_languages, self.embedding_dim,
downsample_factor=downsampling_factor)
def add_factorized(self, n_languages, rank=4, multiplicative=True, fast=False, dyrank=False,
**kwargs):
"""
:param sub_factor_rank:
:param sub_factors:
:param n_languages: int or list of ints?
:param rank: number of vectors
:param multiplicative:
:param fast:
:param dyrank
:return:
"""
# first, tell the attention modules to add factorize
self.self_attn.add_factorized_weights(n_languages, rank=rank,
multiplicative=multiplicative, dyrank=dyrank, fast=fast)
# add factorized for the sub-factors
self.multiplicative_factorize = multiplicative
self.is_factorized = True
self.fast_factorize = fast
self.dyrank = dyrank
embed_dim = self.embedding_dim
ffn_dim = self.ffn_embedding_dim
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_embedding_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embedding_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embedding_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_embedding_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
# These parameters are NOT USED with fast factorize
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_embedding_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embedding_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embedding_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_embedding_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
else:
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
# First check if we use multiplicative
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
# For addictive
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize or self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def call_self_attn(self, x, self_attn_padding_mask=None, positions=None, attn_mask=None,
max_len=None, cu_seqlens=None, lang=None, atb=None, checkpointing=False):
if not self.using_s4:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
positions=positions,
attn_mask=attn_mask, # this probably doesn't do anything
max_len=max_len, cu_seqlens=cu_seqlens,
lang=lang, atb=atb,
checkpointing=checkpointing
)
return x, attn
# In s4 case:
x = self.self_attn(x, self_attn_padding_mask)
return x, None
def call_factorize_mlp(self, x, lang, activation_fn, dropout_p, training_):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
n_languages, _rank = self.rm_i.size(0), self.rm_i.size(1)
# TODO: mm instead of index select for multiple code
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
x = factorize_linear(x, in_weight, in_bias, rm_i, sm_i)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = factorize_linear(x, out_weight, out_bias, rm_o, sm_o)
return x
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
positions=None,
max_len=-1, cu_seqlens=None,
lang=None, atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
# is_fast = self.self_attn.fast_attention
is_fast = False
def call_mlp(x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function, checkpointing):
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, checkpointing, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
if self.has_adapter:
if self.adapter_location == 1:
assert lang is not None
x = self.adapter(x, lang=lang)
x.add_(residual) # residual is before the big FFN
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
# SELF ATTENTION
x, attn = self.call_self_attn(
x,
self_attn_padding_mask=self_attn_padding_mask,
positions=positions,
attn_mask=self_attn_mask,
max_len=max_len, cu_seqlens=cu_seqlens,
lang=lang, atb=atb,
checkpointing=checkpointing_self_attn
)
x = self.dropout1(x) + residual
residual = x
# MLP
x = self.final_layer_norm(x)
if self.fast_factorize:
x = self.call_factorize_mlp(x, lang, self.activation_fn,
self.dropout2.p,
self.training)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, checkpointing_ffn)
x = self.dropout3(x) + residual
return x, attn
else:
# THE BELOW CODE HAS NEVER BEEN RUN AND TESTED
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
# x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, checkpointing_ffn)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| 63,460
| 36.440118
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/file_io.py
|
import os
import shutil
from typing import List, Optional
import logging
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
| 4,912
| 28.071006
| 87
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm.py
|
import math
import logging
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.layer_norm import LayerNorm
from .fairseq_modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
SamePad,
TransposeLast,
)
from .wavlm_modules import (
WavLMMultiheadAttention,
get_activation_fn,
init_bert_params,
GLU_Linear
)
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
class WavLMConfig:
def __init__(self, cfg=None):
self.extractor_mode: str = "default" # mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with normalize=True)
self.encoder_layers: int = 12 # num encoder layers in the transformer
self.encoder_embed_dim: int = 768 # encoder embedding dimension
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
self.encoder_attention_heads: int = 12 # num encoder attention heads
self.activation_fn: str = "gelu" # activation function to use
self.layer_norm_first: bool = False # apply layernorm first in the transformer
self.conv_feature_layers: str = "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2" # string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]
self.conv_bias: bool = False # include bias in conv encoder
self.feature_grad_mult: float = 1.0 # multiply feature extractor var grads by this
self.normalize: bool = False # normalize input to have 0 mean and unit variance during training
# dropouts
self.dropout: float = 0.1 # dropout probability for the transformer
self.attention_dropout: float = 0.1 # dropout probability for attention weights
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
self.dropout_features: float = 0.0 # dropout to apply to the features (after feat extr)
# masking
self.mask_length: int = 10 # mask length
self.mask_prob: float = 0.65 # probability of replacing a token with mask
self.mask_selection: str = "static" # how to choose mask length
self.mask_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indicesh
self.no_mask_overlap: bool = False # whether to allow masks to overlap
self.mask_min_space: int = 1 # min space between spans (if no overlap is enabled)
# channel masking
self.mask_channel_length: int = 10 # length of the mask for features (channels)
self.mask_channel_prob: float = 0.0 # probability of replacing a feature with 0
self.mask_channel_selection: str = "static" # how to choose mask length for channel masking
self.mask_channel_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indices
self.no_mask_channel_overlap: bool = False # whether to allow channel masks to overlap
self.mask_channel_min_space: int = 1 # min space between spans (if no overlap is enabled)
# positional embeddings
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
# relative position embedding
self.relative_position_embedding: bool = False # apply relative position embedding
self.num_buckets: int = 320 # number of buckets for relative position embedding
self.max_distance: int = 1280 # maximum distance for relative position embedding
self.gru_rel_pos: bool = False # apply gated relative position embedding
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
self.__dict__.update(cfg)
class WavLM(nn.Module):
def __init__(
self,
cfg: WavLMConfig,
) -> None:
super().__init__()
# logger.info(f"WavLM Config: {cfg.__dict__}")
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.length_adapter = None
#
# def create_length_adapter(self):
# from .length_adapter import LengthAdapter
# self.length_adapter = LengthAdapter(self.cfg.encoder_embed_dim, self.cfg.encoder_embed_dim)
def apply_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
ret_layer_results: bool = False,
):
#
# if self.feature_grad_mult > 0:
# features = self.feature_extractor(source)
# if self.feature_grad_mult != 1.0:
# features = GradMultiply.apply(features, self.feature_grad_mult)
# else:
with torch.no_grad():
features = self.feature_extractor(source)
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask
)
else:
x = features
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
res = {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
feature = res["features"] if ret_conv else res["x"]
if ret_layer_results:
feature = (feature, res["layer_results"])
return feature, res["padding_mask"]
def forward(
self,
source,
padding_mask=None,
positions=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
precomputed_tdnn=False,
quantize=False, quantize_only=False,
lang=None,
atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask
)
else:
x = features
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=layer
)
#
# if self.length_adapter is not None:
# x = self.length_adapter(x)
# if padding_mask is not None:
# padding_mask = padding_mask[:, 2::2][:, 2::2][:, 2::2]
res = {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
return res
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
conv_type: str = "default"
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
self.conv_type = conv_type
if self.conv_type == "default":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
elif self.conv_type == "conv2d":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride)
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
elif self.conv_type == "custom":
in_d = 1
idim = 80
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride, padding=1)
)
self.conv_layers.append(
torch.nn.LayerNorm([dim, idim])
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
if (i + 1) % 2 == 0:
self.conv_layers.append(
torch.nn.MaxPool2d(2, stride=2, ceil_mode=True)
)
idim = int(math.ceil(idim / 2))
else:
pass
def forward(self, x, mask=None):
# BxT -> BxCxT
x = x.unsqueeze(1)
if self.conv_type == "custom":
for conv in self.conv_layers:
if isinstance(conv, nn.LayerNorm):
x = x.transpose(1, 2)
x = conv(x).transpose(1, 2)
else:
x = conv(x)
x = x.transpose(2, 3).contiguous()
x = x.view(x.size(0), -1, x.size(-1))
else:
for conv in self.conv_layers:
x = conv(x)
if self.conv_type == "conv2d":
b, c, t, f = x.size()
x = x.transpose(2, 3).contiguous().view(b, c * f, t)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
if hasattr(args, "relative_position_embedding"):
self.relative_position_embedding = args.relative_position_embedding
self.num_buckets = args.num_buckets
self.max_distance = args.max_distance
else:
self.relative_position_embedding = False
self.num_buckets = 0
self.max_distance = 0
self.layers = nn.ModuleList(
[
WavLMSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=(self.relative_position_embedding and i == 0),
num_buckets=self.num_buckets,
max_distance=self.max_distance,
gru_rel_pos=args.gru_rel_pos,
)
for i in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, streaming_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, streaming_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
# Length adapter here?
return x, layer_results
def extract_features(self, x, padding_mask=None, streaming_mask=None, tgt_layer=None):
if padding_mask is not None:
x[padding_mask] = 0
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
z = None
if tgt_layer is not None:
layer_results.append((x, z))
r = None
pos_bias = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False,
self_attn_mask=streaming_mask, pos_bias=pos_bias)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
return x, layer_results
class WavLMSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
num_buckets: int = 0,
max_distance: int = 0,
rescale_init: bool = False,
gru_rel_pos: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_name = activation_fn
self.activation_fn = get_activation_fn(activation_fn)
self.self_attn = WavLMMultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
has_relative_attention_bias=has_relative_attention_bias,
num_buckets=num_buckets,
max_distance=max_distance,
rescale_init=rescale_init,
gru_rel_pos=gru_rel_pos,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
if self.activation_name == "glu":
self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
else:
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
self.fused = False
self.fused_function = None
if self.activation_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
# First check if we use multiplicative
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
# TODO: allow for multiple sub factorizers
if self.sub_factorized and atb is not None:
# print("Found atb at multiplication:", atb)
rm_i = torch.index_select(self.sub_rm_i, 0, atb).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sub_sm_i, 0, atb).squeeze(0)
rm_o = torch.index_select(self.sub_rm_o, 0, atb).squeeze(0)
sm_o = torch.index_select(self.sub_sm_o, 0, atb).squeeze(0)
if self.fast_factorize:
sub_mul_factor_in = torch.mm(rm_i.t(), sm_i)
sub_mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
sub_mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
sub_mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
# has to be multiplicative here
mul_factor_in.mul_(sub_mul_factor_in)
mul_factor_out.mul_(sub_mul_factor_out)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
# For addictive
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.sub_factorized and atb is not None:
# print("Found atb at addition:", atb)
r_i = torch.index_select(self.sub_r_i, 0, atb).squeeze(0)
s_i = torch.index_select(self.sub_s_i, 0, atb).squeeze(0)
r_o = torch.index_select(self.sub_r_o, 0, atb).squeeze(0)
s_o = torch.index_select(self.sub_s_o, 0, atb).squeeze(0)
if self.fast_factorize:
sub_add_factor_in = torch.mm(r_i.t(), s_i)
sub_add_factor_out = torch.mm(r_o.t(), s_o)
else:
sub_add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
sub_add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
# has to be additive here
add_factor_in.add(sub_add_factor_in)
add_factor_out.add(sub_add_factor_out)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
pos_bias=None, lang=None, atb=None,
**kwargs
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
def call_mlp(x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function, checkpointing):
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, checkpointing, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
if self.activation_name == "glu":
x = self.fc1(x)
x = self.dropout2(x)
x = self.fc2(x)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, False)
# x = self.activation_fn(self.fc1(x))
# x = self.dropout2(x)
# x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=need_weights,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
# if self.activation_name == "glu":
# x = self.fc1(x)
# else:
# x = self.activation_fn(self.fc1(x))
# x = self.dropout2(x)
# x = self.fc2(x)
if self.activation_name == "glu":
x = self.fc1(x)
x = self.dropout2(x)
x = self.fc2(x)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, False)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn, pos_bias
| 35,759
| 37.123667
| 234
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/fairseq_modules.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
from torch.cuda.amp import custom_fwd, custom_bwd
from onmt.modules.optimized.self_attention_func import self_attn_func, self_attn_compact_func
from onmt.modules.optimized.relative_self_attention_func import relative_self_attn_func
from onmt.modules.optimized.linear import linear_function, factorize_linear
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
# if torch.jit.is_scripting():
# export = True
# if not export and torch.cuda.is_available() and has_fused_layernorm:
# return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
if isinstance(temp, str):
import ast
temp = ast.literal_eval(temp)
assert len(temp) == 3, f"{temp}, {len(temp)}"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
# B x H x T -> B x T x H if not time first
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
# from fsz -> group * num_vars
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
# choose the (indices of) max var in num_vars
_, k = x.max(-1)
# hard_x has the original size of x
# 1 for chosen value, 0 for non-chosen value
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
# mean over the bsz * tsz dimension?
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
# code probabilities for each group
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
# x size: [bsz * tsz * self.groups, self.num_vars]
# the last dimension is basically distribution over different vars (for each group)
x = x.unsqueeze(-1) * vars
# vars is "probably" latent variable embeddings
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
weight_drop=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
favor=False,
generalized_attention=False,
nb_features=256,
**kwargs,
):
super().__init__()
self.rotary_position = False
self.pos_proj_weight = None
self.relative = False
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_p = dropout
self.weight_drop = weight_drop
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.favor = favor
if self.favor:
from onmt.modules.performer import Performer
self.performer = Performer(self.head_dim, nb_features, generalized_attention=generalized_attention)
else:
self.performer = None
self.onnx_trace = False
self.fast_attention = False
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
# from onmt.modules.optimized.fast_mha import fast_bert_mha, fast_self_attn_func
# self.fast_bert_mha = fast_bert_mha
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
def fix_projection_matrices_(self):
if self.proj_updater:
self.proj_updater.feature_redraw_interval = None
def add_factorized_weights(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
embed_dim = self.embed_dim
self.is_factorized = True
self.multiplicative_factorize = multiplicative
self.fast_factorize = fast
self.dyrank = dyrank
if self.fast_factorize:
assert self.multiplicative_factorize is True
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, 3 * embed_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
if self.relative:
self.rm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.sm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
if self.relative:
nn.init.constant_(self.rm_p, constant)
nn.init.constant_(self.sm_p, constant)
if not fast:
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, 3 * embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.relative:
self.r_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
if self.relative:
nn.init.zeros_(self.r_p)
nn.init.normal_(self.s_p, 0.0, 0.02)
else:
std = 0.01 if fast else 0.02
nn.init.normal_(self.r_i, 0.0, std)
nn.init.normal_(self.s_i, 0.0, std)
nn.init.normal_(self.r_o, 0.0, std)
nn.init.normal_(self.s_o, 0.0, std)
if self.relative:
nn.init.normal_(self.r_p, 0.0, std)
nn.init.normal_(self.s_p, 0.0, std)
def convert_fast_attention(self):
# print("Convert from vanilla to fast attention module ...")
if self.fast_attention:
return
self.fast_attention = True
assert self.qkv_same_dim, "Only works with QKV same dim."
w_q = self.q_proj.weight.clone()
w_k = self.k_proj.weight.clone()
w_v = self.v_proj.weight.clone()
weights = [w_q, w_k, w_v]
weight_ = torch.cat(weights, dim=0).contiguous()
b_q = self.q_proj.bias.clone()
b_k = self.k_proj.bias.clone()
b_v = self.v_proj.bias.clone()
biases = [b_q, b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
head_dim = self.head_dim
heads = self.num_heads
input_dim = self.embed_dim
# when we concatenate the weights, the output has the size 3 * D (3 -> heads -> head_dim)
# the fast attention module requires (heads -> 3 -> head_dim)
weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1). \
reshape(-1, input_dim)
bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
weight_t = torch.Tensor(3 * input_dim, input_dim)
bias_t = torch.Tensor(3 * input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
self.proj_weight = Parameter(weight_t)
self.proj_bias = Parameter(bias_t)
self.proj_weight.requires_grad = self.q_proj.weight.requires_grad
self.proj_bias.requires_grad = self.q_proj.bias.requires_grad
del self.q_proj, self.k_proj, self.v_proj
def add_relative_attention(self):
self.relative = True
self.pos_proj_weight = Parameter(torch.Tensor(self.embed_dim, self.embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(self.embed_dim))
self.r_w_bias = Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = Parameter(torch.Tensor(self.num_heads, self.head_dim))
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
# nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
def add_rotary_attention(self):
self.rotary_position = True
assert not self.relative
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
positions: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
cu_seqlens=None, max_len=None,
lang=None, atb=None,
checkpointing=False, **kwargs
) -> Tuple[Tensor, Optional[Tensor]]:
"""
:param checkpointing:
:param positions:
:param query:
:param key:
:param value:
:param key_padding_mask:
:param attn_mask:
:param cu_seqlens:
:param max_len:
:param lang:
:param atb:
:param kwargs:
:return:
"""
is_tpu = query.device.type == "xla"
checkpointing = False # temporarily not checkpoint atm
if not self.favor:
if not self.fast_attention:
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
need_weight = False
assert key is not None and value is not None
assert self.relative == False
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weight,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
else:
in_proj_weight = F.dropout(self.proj_weight, self.weight_drop, training=self.training)
out_proj_weight = F.dropout(self.out_proj.weight, self.weight_drop, training=self.training)
pos_proj_weight = F.dropout(self.pos_proj_weight, self.weight_drop, training=self.training) \
if self.pos_proj_weight is not None else None
if self.is_factorized and self.fast_factorize:
if self.relative:
print("fast factorization is not implemented for relative attention yet")
raise NotImplementedError
hidden_states = query
n_languages, _rank = self.rm_i.size(0), self.rm_i.size(1)
# TODO: mm instead of index select
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
if hidden_states.ndim == 3:
bsz, qlen = hidden_states.size(1), hidden_states.size(0)
low_precision = True # Use CUDA impl
input_lin_results = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
rotary = self.rotary_position
attn_output, coverage = self_attn_compact_func(False, is_training, self.num_heads, input_lin_results,
key_padding_mask, self.dropout_p,
rotary, positions,
False, None, # incremental and state
low_precision,
True, checkpointing) # low-precision and return coverage
# outputs, coverage = self_attn_func(False, is_training, self.num_heads, inputs,
# in_proj_weight, out_proj_weight,
# self.proj_bias, self.out_proj.bias,
# key_padding_mask, self.dropout_p,
# rotary, positions,
# False, None, # incremental and state
# low_precision,
# True, checkpointing) # low-precision and return coverage
attn_output = attn_output.view(qlen, bsz, -1).contiguous()
output = factorize_linear(attn_output, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage
else:
# this doesn't need checkpointing because fmha is doing checkpointing
assert self.fast_bert_mha is not None
assert query.dtype == torch.half
assert cu_seqlens is not None
assert max_len is not None # and max_len <= 512
assert self.relative == False
total_bsz = query.size(0)
# qkv = F.linear(query, in_proj_weight, self.proj_bias) # B x H
qkv = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
# B x 3 x H x d
# transpose 1 2 is necessary here because the weights are designed to be heads x 3 x d
# (for the more simple version without transposing)
if not self.rotary_position:
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
else:
assert positions is not None
cos, sin = positions
queries, keys, values = qkv.view(total_bsz, self.num_heads, 3, self.head_dim)
queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin)
qkv = torch.stack([queries, keys, values], dim=2).transpose(1, 2).contiguous()
dropout_p = self.dropout_p if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
# False = return softmax
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
output = factorize_linear(context, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage
if self.is_factorized:
if self.multiplicative_factorize:
# squeeze possible because only 1
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0)
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.relative:
rm_p = torch.index_select(self.rm_p, 0, lang).squeeze(0)
sm_p = torch.index_select(self.sm_p, 0, lang).squeeze(0)
if self.dyrank:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
if self.relative:
pos_factor = torch.mm(rm_p.t(), sm_p)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
if self.relative:
pos_factor = torch.bmm(rm_p.unsqueeze(-1), sm_p.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight * mul_factor_in
out_proj_weight = out_proj_weight * mul_factor_out
if self.relative:
pos_proj_weight = pos_proj_weight * pos_factor
# TODO: dyrank select rank
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.relative:
r_p = torch.index_select(self.r_p, 0, lang).squeeze(0)
s_p = torch.index_select(self.s_p, 0, lang).squeeze(0)
if self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
if self.relative: pos_factor = torch.mm(r_p.t(), s_p)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.relative: pos_factor = torch.bmm(r_p.unsqueeze(-1), s_p.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight + add_factor_in
out_proj_weight = out_proj_weight + add_factor_out
if self.relative:
pos_proj_weight = pos_proj_weight + pos_factor
# Forward Pass starts here
if query.ndim == 3:
# Call semi-fast attention from CUDA/
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
inputs = query
is_training = self.training
low_precision = True
if self.relative:
recompute = checkpointing
outputs, coverage = relative_self_attn_func(inputs, positions, False,
is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
self.proj_bias, self.out_proj.bias, self.pos_proj_bias,
self.r_w_bias, self.r_r_bias,
key_padding_mask, self.dropout_p,
False, None, False, # incremental and state and double precision
False, True, recompute) # learnable_pos + return-coverage
else:
rotary = self.rotary_position
outputs, coverage = self_attn_func(False, is_training, self.num_heads, inputs,
in_proj_weight, out_proj_weight,
self.proj_bias, self.out_proj.bias,
key_padding_mask, self.dropout_p,
rotary, positions,
False, None, # incremental and state
low_precision,
True, checkpointing) # low-precision and return coverage
return outputs, coverage
# Fused attention using packed data (B T H) -> (BxT H) and removing padded positions
elif query.ndim == 2:
# this doesn't need checkpointing because fmha is doing checkpointing
assert self.fast_bert_mha is not None
assert query.dtype == torch.half
assert cu_seqlens is not None
assert max_len is not None # and max_len <= 512
assert self.relative == False
total_bsz = query.size(0)
qkv = F.linear(query, in_proj_weight, self.proj_bias) # B x H
# B x 3 x H x d
# transpose 1 2 is necessary here because the weights are designed to be heads x 3 x d
# (for the more simple version without transposing)
if not self.rotary_position:
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
else:
assert positions is not None
cos, sin = positions
queries, keys, values = qkv.view(total_bsz, self.num_heads, 3, self.head_dim)
queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin)
qkv = torch.stack([queries, keys, values], dim=2).transpose(1, 2).contiguous()
dropout_p = self.dropout_p if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
# False = return softmax
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
outputs = F.linear(context, out_proj_weight, self.out_proj.bias)
return outputs, coverage
else:
# using performer attention
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
bsz, len_q, hidden = q.size(0), q.size(1), q.size(2)
h, d = self.num_heads, self.head_dim
len_k, len_v = k.size(1), v.size(1)
q = q.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3).reshape(bsz * h, len_q, d)
k = k.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3).reshape(bsz * h, len_k, d)
v = v.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3) # .reshape(bsz * h, len_v, d)
# 1 for padded positions, 0 for non-padded positions
if key_padding_mask is not None:
key_padding_mask = key_padding_mask[:, None, :, None]
v.masked_fill_(key_padding_mask, 0)
v = v.reshape(bsz * h, len_v, d)
out, attn = self.performer(q, k, v)
# out = out.transpose(1, 2).view(bsz, out.size(-2), -1)
out = out.reshape(bsz, h, len_q, -1).permute(0, 2, 1, 3).reshape(bsz, len_v, -1)
out = self.out_proj(out)
return out, attn
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
class IndexCopy(torch.autograd.Function):
"""
This function is kinda similar to rnn pad_packed_sequence
It remaps nonpadded values for a (N-1)-d tensor into a (N)-d tensor
"""
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
"""
:param ctx:
:param input: 2D [bsz x ... ] bsz is the total number of elements after unpadding
:param non_pad_indices: bsz * seq_len
:param total_batch_size: (int) bsz * seq_len (before unpadding) > bsz
:return:
In the forward pass we create a new zero tensor and copy the inputs into it based on non_pad_indices
"""
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads:
:return:
In the backward pass we simply
"""
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
index_copy = IndexCopy.apply
#
# class ConvDownsampler(nn.Module):
# def __init__(
# self,
# conv_layers: List[Tuple[int, int, int]], # n_in, n_out, kernel size?
# dropout: float = 0.0,
# mode: str = "default",
# conv_bias: bool = False,
# ):
# super().__init__()
#
# assert mode in {"default", "layer_norm"}
#
# def block(
# n_in,
# n_out,
# k,
# stride,
# is_layer_norm=False,
# is_group_norm=False,
# conv_bias=False,
# ):
# def make_conv():
# conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
# nn.init.kaiming_normal_(conv.weight)
# return conv
#
# assert (
# is_layer_norm and is_group_norm
# ) == False, "layer norm and group norm are exclusive"
#
# if is_layer_norm:
# return nn.Sequential(
# make_conv(),
# nn.Dropout(p=dropout),
# nn.Sequential(
# TransposeLast(),
# LayerNorm(dim),
# TransposeLast(),
# ),
# nn.GELU(),
# )
# elif is_group_norm:
# return nn.Sequential(
# make_conv(),
# nn.Dropout(p=dropout),
# Fp32GroupNorm(dim, dim, affine=True),
# nn.GELU(),
# )
# else:
# return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
#
# in_d = 1
# self.conv_layers = nn.ModuleList()
# for i, cl in enumerate(conv_layers):
# assert len(cl) == 3, "invalid conv definition: " + str(cl)
# (dim, k, stride) = cl
#
# self.conv_layers.append(
# block(
# in_d,
# dim,
# k,
# stride,
# is_layer_norm=mode == "layer_norm",
# is_group_norm=mode == "default" and i == 0,
# conv_bias=conv_bias,
# )
# )
# in_d = dim
#
# def forward(self, x):
#
# # BxT -> BxCxT (only for waveforms with 1 channel)
# x = x.unsqueeze(1)
#
# for conv in self.conv_layers:
# x = conv(x)
#
# return x
| 41,973
| 39.870497
| 128
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/dataclass.py
|
import sys
from dataclasses import _MISSING_TYPE, dataclass, field
from typing import Any, List, Optional, Tuple
from .enum import ChoiceEnum
from .utils import get_activation_fn, get_available_activation_fns
class FairseqDataclass:
"""fairseq base dataclass that supported fetching attributes and metas"""
_name: Optional[str] = None
@staticmethod
def name():
return None
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(
self, attribute_name: str, meta: str, default: Optional[Any] = None
) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default
):
return getattr(self, attribute_name)
f = self.__dataclass_fields__[attribute_name]
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "help")
def _get_argparse_const(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_const")
def _get_argparse_alias(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_alias")
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "choices")
@classmethod
def from_namespace(cls, args):
if isinstance(args, cls):
return args
else:
config = cls()
for k in config.__dataclass_fields__.keys():
if k.startswith("_"):
# private member, skip
continue
if hasattr(args, k):
setattr(config, k, getattr(args, k))
return config
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
| 9,957
| 35.07971
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/bayes_by_backprop/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import PrePostProcessing
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.models.bayes_by_backprop.relative_transformer_layers import \
TransformerEncoderLayer, TransformerDecoderLayer
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
assert self.opt.src_reversible == False
e_length = expected_length(self.layers, self.death_rate)
print("* Bayes-By-Backprop Relative Transformer Encoder with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = TransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose().unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = mask_src
# mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# mask_src = mask_src.gt(0)
# with right padding, causal mask covers the mask pad
mask_src = attn_mask_src
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
# B x T x H -> T x B x H
context = emb
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
return output_dict
def log_prior(self):
log_prior = 0
for module in self.layer_modules:
log_prior += module.log_prior()
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
for module in self.layer_modules:
log_variational_posterior += module.log_variational_posterior()
return log_variational_posterior
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
print("* Bayes-By-Backprop Relative Transformer Decoder with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = TransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def log_prior(self):
log_prior = 0
for module in self.layer_modules:
log_prior += module.log_prior()
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
for module in self.layer_modules:
log_variational_posterior += module.log_variational_posterior()
return log_variational_posterior
def forward(self, input, context, src, input_pos=None, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
#
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# dec_attn_mask = dec_attn_mask.gt(0)
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
output = self.preprocess_layer(emb.contiguous())
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class BayesianTransformer(Transformer):
def log_prior(self):
return self.encoder.log_prior() + self.decoder.log_prior()
def log_variational_posterior(self):
return self.encoder.log_variational_posterior() + self.decoder.log_variational_posterior()
| 18,937
| 38.372141
| 114
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer_layers.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.utils import flip
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.bayes_by_backprop.encdec_attention import EncdecMultiheadAttn
from onmt.modules.bayes_by_backprop.feed_forward import PositionWiseFeedForward
from onmt.modules.bayes_by_backprop.relative_self_attention import RelativeSelfMultiheadAttn
class TransformerEncoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def log_prior(self):
log_prior = 0
log_prior += self.multihead.log_prior
self.multihead.log_prior = 0
log_prior += self.feedforward.log_prior
self.feedforward.log_prior = 0
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
log_variational_posterior += self.multihead.log_variational_posterior
self.multihead.log_variational_posterior = 0
log_variational_posterior += self.feedforward.log_variational_posterior
self.feedforward.log_variational_posterior = 0
return log_variational_posterior
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class TransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
def log_prior(self):
log_prior = 0
log_prior += self.multihead_src.log_prior
self.multihead_src.log_prior = 0
log_prior += self.multihead_tgt.log_prior
self.multihead_tgt.log_prior = 0
log_prior += self.feedforward.log_prior
self.feedforward.log_prior = 0
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
log_variational_posterior += self.multihead_src.log_variational_posterior
self.multihead_src.log_variational_posterior = 0
log_variational_posterior += self.multihead_tgt.log_variational_posterior
self.multihead_tgt.log_variational_posterior = 0
log_variational_posterior += self.feedforward.log_variational_posterior
self.feedforward.log_variational_posterior = 0
return log_variational_posterior
| 8,703
| 40.447619
| 103
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/multilingual_translator/reversible_transformers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.layer_norm import LayerNorm
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
class RelativeSelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, dropout=opt.attn_dropout,
learnable_pos=opt.learnable_position_encoding,
max_pos=opt.max_pos_length)
self.variational = opt.variational_dropout
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, incremental=False,
incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, pos, key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=opt.variational_dropout, glu=opt.ffn_glu,
activation=opt.ffn_activation)
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = EncdecMultiheadAttn(opt.n_heads, opt.model_size, attn_drop=opt.attn_dropout)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, attn_mask):
# attn_output, hidden_states = hidden_states, hidden_states # torch.chunk(hidden_states, 2, dim=-1)
first_input, second_input = hidden_states, hidden_states
# this block should be run under torch.no_grad()?
with torch.no_grad():
for layer in layers:
# forward pass in the layer
first_input, second_input = layer(
first_input, second_input, pos, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(first_input.clone().detach(), second_input, pos)
ctx.layers = layers
ctx.attn_mask = attn_mask # just in case attn_mask is None
with torch.no_grad():
output = first_input + second_input
output.div_(2)
# The only memory footprint is the last layer outputs and the "output".
return output
@staticmethod
def backward(ctx, grad_output):
grad_output.mul_(0.5)
first_grad_output, second_grad_output = grad_output, grad_output
# retrieve params from ctx
first_output, second_output, pos = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
first_output, second_output, first_grad_output, second_grad_output = layer.backward_pass(
first_output, second_output, first_grad_output, second_grad_output, pos, attn_mask
)
grad_hidden_states = first_grad_output + second_grad_output
# the position encodings don't need embeddings
return None, grad_hidden_states, None, None
def reversible_encoder(layers, hidden_states, pos, attn_mask):
return ReversibleEncoderFunction.apply(layers, hidden_states, pos, attn_mask)
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = RelativeSelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, attn_mask=None):
"""
:param pos: position embeddings
:param x2:
:param x1:
:param attn_mask:
:return:
"""
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2, pos, attn_mask)
z1, coverage = self.self_attn(x2, pos, key_padding_mask=attn_mask, attn_mask=None, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, pos, attn_mask=None):
"""
:param pos:
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
gy1 = self.feedforward(y1)
gy1.backward(dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - gy1
del gy1, y2
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
fx2, _, = self.self_attn(x2, pos, key_padding_mask=attn_mask)
fx2.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
return x1, x2, dx1, dx2
##################################################
############## DECODER FUNCTION ##################
##################################################
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, context, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
x1, x2 = hidden_states, hidden_states
coverages = []
for layer in layers:
idx = idx + 1
# forward pass in the layer
x1, x2, coverage_src = layer(
x1, x2, pos, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
coverages.append(coverage_src)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(x1.clone().detach(), x2, context, pos)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = x1 + x2
# concatenate 2 revnet outputs:
return output.mul_(0.5), torch.stack(coverages)
@staticmethod
def backward(ctx, grad_hidden_states, grad_coverage):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_hidden_states.mul_(0.5)
dx1, dx2 = grad_hidden_states, grad_hidden_states
# retrieve params from ctx
x1, x2, context, pos = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
x1, x2, dx1, dx2, grad_context_ = layer.backward_pass(
x1, x2, dx1, dx2,
pos, context.detach(), tgt_mask, src_mask
)
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_input = dx1 + dx2
# grad pos is also None
return None, grad_input, None, grad_context, None, None, None, None
def reversible_decoder(layers, hidden_states, pos, context, tgt_mask, src_mask, incremental, incremental_cache):
return ReversibleDecoderFunction.apply(layers, hidden_states, pos, context,
tgt_mask, src_mask, incremental, incremental_cache)
class ReversibleTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = RelativeSelfAttention(opt)
self.feed_forward_first = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
self.feed_forward_second = FeedForward(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward1_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn1_cpu_state = torch.get_rng_state()
self.ffn1_gpu_devices, self.ffn1_gpu_states = get_device_states(*args)
def _init_feedforward2_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn2_cpu_state = torch.get_rng_state()
self.ffn2_gpu_devices, self.ffn2_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param pos:
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2, pos)
f_x2, coverage, = self.self_attention(x2, pos,
key_padding_mask=None, attn_mask=mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z1 = f_x2 + x1
self._init_feedforward1_seed(z1)
g_z1 = self.feed_forward_first(z1, cleaning=True)
z2 = x2 + g_z1
self._init_src_attention_seed(z2, context, mask_src)
h_z2, coverage_src = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
y1 = z1 + h_z2
# prepare the state for the second function
self._init_feedforward2_seed(y1)
k_y1 = self.feed_forward_second(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = z2 + k_y1
"""return Y1 and Y2"""
return y1, y2, coverage_src
def backward_pass(self, y1, y2, dy1, dy2, pos, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param pos:
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn2_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn2_cpu_state)
set_device_states(self.ffn2_gpu_devices, self.ffn2_gpu_states)
k_y1 = self.feed_forward_second(y1)
k_y1.backward(dy2)
with torch.no_grad():
z2 = y2 - k_y1
del k_y1, y2
# Dz1 = DY1 + Y1.grad
dz1 = dy1 + y1.grad
del dy1
y1.grad = None
# second block
with torch.enable_grad():
z2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.src_attn_gpu_devices, enabled=True):
torch.set_rng_state(self.src_attn_cpu_state)
set_device_states(self.src_attn_gpu_devices, self.src_attn_gpu_states)
# if not self.ignore_source:
h_z2, _ = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
# torch.autograd.backward(h_z2, dz1)
h_z2.backward(dz1)
with torch.no_grad():
z1 = y1 - h_z2
del y1, h_z2
dz2 = dy2 + z2.grad
z2.grad = None
del dy2
grad_context = context.grad
del context.grad
# third block
with torch.enable_grad():
z1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn1_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn1_cpu_state)
set_device_states(self.ffn1_gpu_devices, self.ffn1_gpu_states)
g_z1 = self.feed_forward_first(z1)
# torch.autograd.backward(g_z1, dz2)
g_z1.backward(dz2)
#
with torch.no_grad():
x2 = z2 - g_z1
del z2, g_z1
dx1 = dz1 + z1.grad
z1.grad = None
del dz1
# fourth block
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, _, = self.self_attention(x2, pos,
key_padding_mask=None, attn_mask=mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2.backward(dx1)
with torch.no_grad():
x1 = z1 - f_x2
del z1, f_x2
dx2 = dz2 + x2.grad
x2.grad = None
del dz2
return x1, x2, dx1, dx2, grad_context
| 21,390
| 34.89094
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/multilingual_translator/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding, FastSinusoidalPositionalEncoding
from onmt.models.transformer_layers import PrePostProcessing
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from .reversible_transformers import ReversibleTransformerEncoderLayer, reversible_encoder
from .reversible_transformers import ReversibleTransformerDecoderLayer, reversible_decoder
from onmt.modules.identity import Identity
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from torch.utils.checkpoint import checkpoint
torch.set_printoptions(threshold=500000)
def create_forward_function(module):
def forward_pass(*inputs):
return module(*inputs)
return forward_pass
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.unidirectional = opt.unidirectional
self.n_heads = opt.n_heads
self.n_languages = opt.n_languages
self.checkpointing = opt.checkpointing
self.absolute_position_encoding = opt.absolute_position_encoding
self.early_emb_scale = opt.encoder_early_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.src_reversible
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
if not self.early_emb_scale and (self.use_language_embedding or self.absolute_position_encoding):
print("[INFO] Embedding will be scaled after being added with embedding and position encoding."
"\n[INFO] For multilingual models its advisable to use -encoder_early_emb_scale")
# learnable position encoding
if self.learnable_position_encoding:
assert not self.rotary_position_encoding
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Relative Reversible Encoder with %.2f expected layers" % e_length)
else:
print("* Relative Translation Encoder with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if self.reversible:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
else:
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
bsz_first_input = input
input = input.transpose(0, 1)
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
mem_len = 0
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(0)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if not self.rotary_position_encoding:
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # 1 x src_len x batch_size for broadcasting
elif self.rotary_position_encoding:
# generate rotary position encodings as sinusoidal
pos_emb = self.positional_encoder(input, seq_dim=0)
mask_src = input.eq(onmt.constants.PAD).transpose(0, 1) # bsz first
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
if not self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
# context size is now T x B x H
context = self.preprocess_layer(emb)
if self.reversible:
context = reversible_encoder(self.layer_modules, context, pos_emb, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# if self.checkpointing == 0 or self.training is False:
# context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# else:
# context = checkpoint(create_forward_function(layer), context, pos_emb, mask_src, input_lang)
# final layer norm. we can consider this layer norm as a part of the output layer/function
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input, 'pos_emb': pos_emb})
return output_dict
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.n_heads = opt.n_heads
self.checkpointing = opt.checkpointing
self.late_emb_scale = opt.decoder_late_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.tgt_reversible
self.rotary_position_encoding = opt.rotary_position_encoding
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
if self.learnable_position_encoding:
assert self.rotary_position_encoding is False
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
if self.reversible:
print("* Transformer Reversible Decoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, src_lang=None, tgt_lang=None,
streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if not self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is left aligned so we do not need to add the pad mask
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()
dec_attn_mask = dec_attn_mask.bool()
# relative positions
if self.rotary_position_encoding:
pos_emb = self.positional_encoder(input, seq_dim=0)
pos_emb_src = self.positional_encoder(context, seq_dim=0)
elif not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
pos_emb_src = None
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
pos_emb_src = None
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype).long()
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
if self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
output = self.preprocess_layer(emb.contiguous())
if self.reversible:
# TODO: add src lang and tgt lang to reversible
output, coverage = reversible_decoder(self.layer_modules, output, pos_emb, context,
dec_attn_mask.squeeze(-1), mask_src,
False, None) # incremental variables
else:
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=tgt_lang,
pos_emb_src=pos_emb_src)
# if self.checkpointing == 0 or self.training is False:
#
# output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
# src_lang=src_lang, tgt_lang=tgt_lang)
#
# else:
# output, coverage = checkpoint(create_forward_function(layer), output, context, pos_emb,
# dec_attn_mask,
# mask_src, src_lang, tgt_lang)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
if self.rotary_position_encoding:
buffering = False
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
# input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if not self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
# if not self.absolute_position_encoding:
if self.rotary_position_encoding:
pos_emb = self.positional_encoder(input, seq_dim=0)
pos_emb_src = self.positional_encoder(context, seq_dim=0)
else:
pos_emb_src = None
if self.learnable_position_encoding:
if buffering:
distance_mat = torch.arange(-klen + 1, 1, 1, device=emb.device).unsqueeze(0)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# else:
# if buffering:
# emb = self.positional_encoder(emb.transpose(0, 1), t=input.size(1)).transpose(0, 1)
# else:
# emb = self.positional_encoder(emb.transpose(0, 1)).transpose(0, 1)
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
if self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
output = emb.contiguous()
if self.reversible:
incremental = True
incremental_cache = buffer
output, coverage = reversible_decoder.apply(self.layer_modules, output, pos_emb, context,
dec_attn_mask, mask_src,
incremental, incremental_cache)
else:
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang, pos_emb_src=pos_emb_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=lang, pos_emb_src=pos_emb_src)
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 21,668
| 43.58642
| 130
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer_layers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.dropout import variational_dropout
from onmt.modules.identity import Identity
from onmt.modules.optimized.dropout_add import fused_dropout_add
def preprocessing(rezero, model_size, post_norm=False):
sequence = ''
if not rezero and not post_norm:
sequence += 'n'
return PrePostProcessing(model_size, 0.0, sequence=sequence)
def postprocessing(rezero, model_size, dropout, variational=False, post_norm=False,
dropout_residual=True):
sequence = ''
if dropout_residual:
sequence += 'd'
if rezero:
sequence += 'z'
else:
sequence += 'a'
if post_norm:
sequence += 'n'
return PrePostProcessing(model_size, dropout,
sequence=sequence,
variational=variational)
class RelativeTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.batch_ensemble = opt.batch_ensemble
# self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.death_rate = death_rate
self.mfw = opt.multilingual_factorized_weights
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.dropout = opt.dropout
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.rezero = opt.rezero
self.rotary_position_encoding = opt.rotary_position_encoding
self.learnable_pos = opt.learnable_position_encoding
self.stochastic_sublayer = opt.stochastic_sublayer
self.post_norm = opt.post_norm
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=self.post_norm,
res_dropout=self.residual_dropout)
self.postprocess_mcr_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.mcr_feedforward.dropout_residual)
self.preprocess_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
self.preprocess_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
if not self.rotary_position_encoding:
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
elif self.rotary_position_encoding:
self.multihead = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
rotary_pos_enc=True)
self.postprocess_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.feedforward.dropout_residual)
def forward(self, input, pos_emb, attn_mask, src_lang=None,
incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
# input = fused_dropout_add(out * ffn_scale, input,self.residual_dropout, self.training)
if self.stochastic_sublayer: # re-toss-coin
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
if self.mfw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
# input = fused_dropout_add(out, input, self.residual_dropout, self.training)
if self.stochastic_sublayer: # re-toss-coin
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input), src_lang)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.batch_ensemble = opt.batch_ensemble
self.mfw = opt.multilingual_factorized_weights
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.dropout = opt.dropout
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.rezero = opt.rezero
self.n_heads = opt.n_heads
self.learnable_pos = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.stochastic_sublayer = opt.stochastic_sublayer
self.post_norm = opt.post_norm
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
self.postprocess_mcr_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.mcr_feedforward.dropout_residual)
self.preprocess_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
if not self.ignore_source:
self.preprocess_src_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_src_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
if not self.mfw:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
self.preprocess_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead_tgt = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
if self.rotary_position_encoding:
self.multihead_tgt = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
rotary_pos_enc=True)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.postprocess_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.feedforward.dropout_residual)
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
src_lang=None, tgt_lang=None, pos_emb_src=None,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
# input = self.postprocess_mcr_ffn(out * ffn_scale, input)
input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be T x B x H
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.mfw:
out, _ = self.multihead_tgt(query, pos_emb, tgt_lang, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
# input = self.postprocess_attn(out, input)
input = fused_dropout_add(out, input, self.residual_dropout, self.training)
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
if self.mfw:
out, coverage = self.multihead_src(query, context, context, src_lang, tgt_lang, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, mask_src,
rotary_pos_enc=self.rotary_position_encoding,
pos_emb_q=pos_emb,
pos_emb_k=pos_emb_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
# input = self.postprocess_src_attn(out, input)
input = fused_dropout_add(out, input, self.residual_dropout, self.training)
else:
coverage = None
else:
coverage = input.new_zeros(input.size(1), self.n_heads,
input.size(0), context.size(0) if context is None else input.size(0))
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input), tgt_lang)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
# input = self.postprocess_ffn(out * ffn_scale, input)
input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if incremental_cache is None:
return input, coverage
else:
return input, coverage, incremental_cache
| 21,330
| 50.524155
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/discourse/discourse_transformer.py
|
# Transformer with discourse information
from collections import defaultdict
import onmt
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from onmt.modules.pre_post_processing import PrePostProcessing
from .gate_layer import RelativeGateEncoderLayer
class DiscourseTransformerEncoder(nn.Module):
def __init__(self, opt, encoder=None):
self.opt = opt
super(DiscourseTransformerEncoder, self).__init__()
# a shared encoder for all present, past and future
self.encoder = encoder
if hasattr(encoder, 'word_lut'):
self.word_lut = encoder.word_lut
from ..multilingual_translator.relative_transformer_layers \
import RelativeTransformerEncoderLayer
else:
from ..speech_recognizer.relative_transformer_layers \
import RelativeTransformerEncoderLayer
self.past_layer = RelativeTransformerEncoderLayer(self.opt)
self.input_type = encoder.input_type
self.time = None # backward compatible
self.gate_layer = RelativeGateEncoderLayer(self.opt)
self.postprocess_layer = PrePostProcessing(opt.model_size, 0.0, sequence='n')
def forward(self, input, past_input=None, input_lang=None, factorize=False):
assert past_input is not None
# the same encoder is used to encode the previous and current segment
past_encoder_output = self.encoder(past_input, input_lang=input_lang, factorize=factorize)
#
past_context = past_encoder_output['context']
past_pos_emb = past_encoder_output['pos_emb']
encoder_output = self.encoder(input, input_lang=input_lang, factorize=factorize)
# past_mask_src = past_input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
# past_context = self.past_layer(past_context, past_pos_emb, past_mask_src,
# src_lang=input_lang, factorize=factorize)
current_context = encoder_output['context']
current_pos_emb = encoder_output['pos_emb']
if len(input.size()) > 2:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
past_mask = past_input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = input.transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
past_mask = past_input.eq(onmt.constants.PAD).unsqueeze(1)
dec_attn_mask = input.eq(onmt.constants.PAD).unsqueeze(1)
context = self.gate_layer(current_context, past_context, current_pos_emb, mask_src, past_mask,
src_lang=input_lang, factorize=factorize)
# context = current_context
# final layer norm
context = self.postprocess_layer(context, factor=input_lang)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask,
'src': input, 'pos_emb': current_pos_emb})
del past_encoder_output
return output_dict
class DiscourseTransformer(Transformer):
"""Main model in 'Attention is all you need' """
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, factorize=True, **kwargs):
"""
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
past_src = batch.get('past_source')
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
past_src = past_src.transpose(0, 1)
# Encoder has to receive different inputs
encoder_output = self.encoder(src, past_input=past_src, input_lang=src_lang,
factorize=factorize)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# the state is changed
streaming_state = encoder_output['streaming_state']
# zero out the encoder part for pre-training
if zero_encoder:
context.zero_()
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state, factorize=factorize)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# output_dict['lid_logits'] = decoder_output['lid_logits']
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
del encoder_output
return output_dict
def load_encoder_weights(self, pretrained_model):
# take the shared encoder section of the encoder
encoder_ = self.encoder.encoder
pretrained_model.encoder.language_embedding = None
enc_language_embedding = encoder_.language_embedding
encoder_.language_embedding = None
encoder_state_dict = pretrained_model.encoder.state_dict()
encoder_.load_state_dict(encoder_state_dict)
encoder_.language_embedding = enc_language_embedding
# TODO: override
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, factorize=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
past_src = batch.get('past_source')
src_transposed = src.transpose(0, 1)
# encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang)
encoder_output = self.encoder(src_transposed, past_input=past_src.transpose(0, 1), input_lang=src_lang,
factorize=factorize)
# The decoding state is still the same?
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state
| 10,079
| 42.261803
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/discourse/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/discourse/gate_layer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.multilingual_partitioned.linear import MPPositionWiseFeedForward
from onmt.modules.multilingual_partitioned.encdec_attention import MPEncdecMultiheadAttn
from onmt.modules.multilingual_partitioned.relative_attention import MPRelativeSelfMultiheadAttn
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class RelativeGateEncoderLayer(nn.Module):
def __init__(self, opt, **kwargs):
super(RelativeGateEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.depthwise_conv = opt.depthwise_conv
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.no_ffn = opt.no_ffn
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if self.mfw:
assert not self.mpw, "[ERROR] factorized and partitioned weights cannot be used at the same time."
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.preprocess_src_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_src_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
if not self.no_ffn:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
if self.depthwise_conv:
self.preprocess_conv = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_conv = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.depthwise_conv = ConformerConvBlock(opt.model_size, opt.conv_kernel, bias=True)
else:
self.depthwise_conv = None
self.gate_linear = Linear(2 * opt.model_size, opt.model_size)
self.preprocess_gate = preprocessing(self.rezero, 2 * opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
def forward(self, input, context, pos_emb, attn_mask, context_mask, src_lang=None, factorize=False):
"""
:param context: discourse context [T_d x B x H]
:param factorize:
:param input: tensor [T x B x H]
:param pos_emb: tensor [T x 1 x H]
:param attn_mask: tensor [1 x T x B]
:param context_mask: tensor [1 x T_d x B]
:param src_lang: tensor [B] or None
:return:
"""
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
"""
Self-attention block
"""
query = self.preprocess_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, factorize=factorize)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None)
input_present = self.postprocess_attn(out, input)
"""
Context attention block
"""
query = self.preprocess_src_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead_src(query, context, context, src_lang, src_lang, context_mask,
factorize=factorize)
else:
out, _ = self.multihead_src(query, context, context, context_mask)
input_past = self.postprocess_src_attn(out, input)
"""
Gate
"""
gate_input = self.preprocess_gate(torch.cat([input_past, input_present], dim=-1))
gate = torch.sigmoid(self.gate_linear(gate_input))
input = gate * input_present + (1 - gate) * input_past
"""
Feed forward layer
"""
if not self.no_ffn:
out = self.feedforward(self.preprocess_ffn(input, factor=src_lang), src_lang, factorize=factorize)
# rescaling before residual
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, src_lang)
return input
| 11,711
| 51.756757
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/transformer_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
from collections import defaultdict
import torch
import torch.nn as nn
from .modules.positional_embeddings import PositionalEmbedding, SinusoidalPositionalEmbedding
from .modules.layer_drop import LayerDropModuleList
from onmt.modules.layer_norm import LayerNorm
from .modules.transformer_layer import TransformerDecoderLayerBase
from torch import Tensor
from pretrain_module.modeling_mbart import index_copy
import numpy as np
class TransformerDecoderBase(nn.Module):
"""
Transformer decoder consisting of *cfg.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
opt=None
):
self.adapter = None
self.cfg = cfg
super(TransformerDecoderBase, self).__init__()
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = nn.Dropout(
cfg.dropout
)
self.decoder_layerdrop = cfg.decoder_layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.embed_dim = embed_dim
self.padding_idx = embed_tokens.padding_idx
print("Decoder padding idx:", self.padding_idx)
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = (
torch.nn.Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
self.checkpoint_activations = cfg.checkpoint_activations
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder_layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder_normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.n_languages = 0
# for layer in self.layers:
# layer.add_adapters(opt.n_languages, adapter_location=opt.decoder_adapter)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)
# removed checkpoint and fsdp
return layer
def add_adapters(self, n_languages):
from .modules.efficient_adapters import EfficientAdapter
self.adapter = EfficientAdapter(n_languages * self.num_layers,
self.embed_dim, self.embed_dim // 4)
self.n_languages = n_languages
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
lang=None,
**kwargs,
):
bsz, qlen = input_ids.size()
klen = encoder_hidden_states.size(0)
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
input_ids, incremental_state=None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(input_ids)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
can_run_fast_bert_mha = False
if self.fast_bert_mha is not None and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
# unpadding x
if attention_mask is None:
padding_mask = input_ids.new_zeros(bsz, qlen)
else:
padding_mask = attention_mask
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
non_pad_indices_q = non_pad_indices
# unpadding context
# transposing from [T x B x H] to [B x T x H]
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices_kv = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices_kv)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
self_attn_mask = None
else:
x = x.transpose(0, 1).contiguous()
max_len, cu_seqlens = None, None
max_len_kv, cu_seqlens_kv = None, None
# causal masking.
self_attn_mask = torch.triu(
x.new_ones(qlen, qlen), diagonal=1).bool()
non_pad_indices_q, non_pad_indices_kv = None, None
self_attn_padding_mask: Optional[Tensor] = None
# decoder layers
attns = list()
for idx, layer in enumerate(self.layers):
x, layer_attn, _ = layer(
x,
encoder_hidden_states,
encoder_attention_mask,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn,
checkpointing_cross_attn=checkpointing_cross_attn,
max_len=max_len, cu_seqlens=cu_seqlens,
max_len_kv=max_len_kv, cu_seqlens_kv=cu_seqlens_kv,
)
# run through the adapter
if self.adapter is not None:
assert lang is not None
adapter_id = self.adapter.num_modules // self.num_layers * idx + lang
x = self.adapter(x, adapter_id)
attns.append(layer_attn)
if self.layer_norm is not None:
x = self.layer_norm(x)
if can_run_fast_bert_mha:
seq_len = qlen
x = index_copy(x, non_pad_indices_q, bsz * seq_len)
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
return x, attns
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
encoder_attention_mask = decoder_state.src_mask
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
atb = decoder_state.tgt_atb
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
input_ = input
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
input_ids, incremental_state=None
)
x = self.embed_scale * self.embed_tokens(input_ids)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
bsz, qlen = x.size(0), x.size(1)
using_buffer = (x.size(1) > 1 and len(buffers) > 0)
if buffering:
# use the last value of input to continue decoding
if using_buffer:
# if buffers has not been initilized and we have > 1 input length data
# then its a prefix decoding step
x = x[:, -1:, :]
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
can_run_fast_bert_mha = False
if self.fast_bert_mha is not None and (torch.is_autocast_enabled() or x.dtype == torch.half) and not buffering:
can_run_fast_bert_mha = True
# unpadding x
padding_mask = input_ids.new_zeros(bsz, qlen)
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
non_pad_indices_q = non_pad_indices
# unpadding context
# transposing from [T x B x H] to [B x T x H]
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices_kv = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices_kv)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
self_attn_mask = None
else:
non_pad_indices_q, non_pad_indices_kv = None, None
# B x T x C -> T x B x C
x = x.transpose(0, 1).contiguous()
max_len = None
cu_seqlens = None
max_len_kv = None
cu_seqlens_kv = None
# causal masking.
self_attn_mask = torch.triu(
x.new_ones(qlen, qlen), diagonal=1).bool()
if buffering and using_buffer:
self_attn_mask = self_attn_mask[-1:, :]
# decoder layers
attns = list()
for idx, layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
x, layer_attn, buffer = layer(
x,
encoder_hidden_states,
encoder_attention_mask,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=None,
max_len = max_len, cu_seqlens = cu_seqlens,
max_len_kv = max_len_kv, cu_seqlens_kv = cu_seqlens_kv,
incremental=buffering, incremental_cache=buffer,
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
attns.append(layer_attn)
if self.layer_norm is not None:
x = self.layer_norm(x)
if can_run_fast_bert_mha:
seq_len = qlen
x = index_copy(x, non_pad_indices_q, bsz * seq_len)
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
output = x[-1].unsqueeze(0)
coverage = attns[-1]
if coverage is None:
coverage = output.new_zeros(bsz, seq_len, seq_len)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
| 15,616
| 35.832547
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/transformer_encoder.py
|
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq.modules import (
# FairseqDropout,
# LayerDropModuleList,
# LayerNorm,
# PositionalEmbedding,
# SinusoidalPositionalEmbedding,
# )
from .modules.positional_embeddings import PositionalEmbedding, SinusoidalPositionalEmbedding
from .modules.layer_drop import LayerDropModuleList
from onmt.modules.layer_norm import LayerNorm
from .modules.transformer_layer import TransformerEncoderLayerBase
from pretrain_module.modeling_mbart import index_copy
import numpy as np
class TransformerEncoderBase(nn.Module):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, embed_tokens):
self.cfg = cfg
super(TransformerEncoderBase, self).__init__()
# TODO
# self.dictionary = dictionary
self.register_buffer("version", torch.Tensor([3]))
# TODO
self.dropout_module = nn.Dropout(cfg.dropout)
# TODO
self.encoder_layerdrop = cfg.encoder_layerdrop
# TODO
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.no_token_positional_embeddings:
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder_learned_pos,
)
)
else:
self.embed_positions = None
# TODO
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder_layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.n_languages = -1
self.has_adapter = False
def build_encoder_layer(self, cfg):
layer = TransformerEncoderLayerBase(cfg)
# removed the checkpointing and fdsp part
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
src_mask: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
return self.forward_scriptable(
src_tokens, src_mask, return_all_hiddens, token_embeddings
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_mask: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
"""
# compute padding mask
if src_mask is None:
encoder_padding_mask = src_tokens.eq(self.padding_idx)
else:
encoder_padding_mask = src_mask
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# TODO: use fast bert mha
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = x.size(1)
bsz = x.size(0)
if self.fast_bert_mha and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
# print("Can run FAST BERT MHA")
padding_mask = encoder_padding_mask # [B x T]
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
max_len = -1
cu_seqlens = None
non_pad_indices = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None,
max_len=max_len, cu_seqlens=cu_seqlens
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not support returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous()
# return {
# "encoder_out": [x], # T x B x C
# "encoder_padding_mask": [encoder_padding_mask], # B x T
# "encoder_embedding": [encoder_embedding], # B x T x C
# "encoder_states": encoder_states, # List[T x B x C]
# "src_tokens": [],
# "src_lengths": [src_lengths],
# }
if can_run_fast_bert_mha:
# remove the patch
# if x.size(0) > total_bsz:
# x = x[:total_bsz, :]
x = index_copy(x, non_pad_indices, bsz * seq_len)
x = x.view(bsz, seq_len, -1)
x = x.transpose(0, 1).contiguous()
return x, encoder_padding_mask, encoder_embedding, encoder_states
| 7,894
| 33.627193
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/deltalm.py
|
import os
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from .transformer_encoder import TransformerEncoderBase
from .transformer_decoder import TransformerDecoderBase
from .modules.transformer_layer import TransformerDecoderLayerBase
from .modules.utils import get_activation_fn
from onmt.modules.layer_norm import LayerNorm
from .modules.multihead_attention import MultiHeadAttention
from onmt.modules.optimized.dropout_add import fused_dropout_add
from pretrain_module.modeling_mbart import index_copy
def dropout_residual_connection(x, residual, dropout_module, is_training):
return dropout_add_jit(x, residual, dropout_module.p, is_training)
@torch.jit.script
def dropout_add_jit(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x, p=prob, training=is_training)
out = residual + out
return out
def linear_act_linear(x, fc1, fc2, prob, is_training, activation_func):
out = fc1(x)
out = activation_func(out)
out = torch.nn.functional.dropout(out, p=prob, training=is_training)
out = fc2(out)
return out
def upgrade_state_dict_for_deltalm(
state_dict: Dict[str, Any], pretrained_deltalm_checkpoint: str, is_encoder=True,
) -> Dict[str, Any]:
if not os.path.exists(pretrained_deltalm_checkpoint):
raise IOError("Model file not found: {}".format(pretrained_deltalm_checkpoint))
with open(pretrained_deltalm_checkpoint, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if 'weights' in state:
deltalm_state_dict = state['weights']
elif 'model' in state:
deltalm_state_dict = state['model']
else:
deltalm_state_dict = state
new_deltalm_state_dict = {}
for key in deltalm_state_dict.keys():
if is_encoder:
if key.startswith('encoder.') or key.startswith('src_embedding.'):
new_key = key.replace('encoder.', '')
new_key = new_key.replace('src_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
else:
if key.startswith('decoder.') or key.startswith('tgt_embedding.'):
new_key = key.replace('decoder.', '')
new_key = new_key.replace('tgt_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
deltalm_state_dict = new_deltalm_state_dict
# print(deltalm_state_dict.keys())
for key in deltalm_state_dict.keys():
if "output_projection" in key:
continue
map_key = key
map_key = map_key.replace('.ffn_1.fc1', '.fc3')
map_key = map_key.replace('.ffn_1.fc2', '.fc4')
map_key = map_key.replace('.ffn_2', '')
map_key = map_key.replace('.ffn.', '.')
map_key = map_key.replace('emb_layer_norm', 'layernorm_embedding')
# print(key, state_dict[map_key].size(), deltalm_state_dict[key].size())
assert map_key in state_dict, map_key
if 'embed_positions' in key or 'embed_tokens' in key:
left_size = state_dict[map_key].size(0)
right_size = deltalm_state_dict[key].size(0)
if left_size <= right_size:
state_dict[map_key] = deltalm_state_dict[key][:left_size]
else:
state_dict[map_key][:right_size] = deltalm_state_dict[key]
else:
state_dict[map_key] = deltalm_state_dict[key]
return state_dict
class DeltaLMEncoder(TransformerEncoderBase):
def __init__(self, args, embed_tokens, opt=None):
super().__init__(args, embed_tokens)
if opt is not None:
print("Overriding dropout values for DeltaLM....")
args.decoder_layerdrop = opt.death_rate_decoder
args.activation_dropout = opt.ffn_dropout
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
self_state_dict = self.state_dict()
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self_state_dict,
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=True,
)
for key in self_state_dict:
if key not in deltalm_loaded_state_dict:
print("Warning: key %s not found in pretrained dictionary." % key)
for key in deltalm_loaded_state_dict:
if key not in self_state_dict:
print("Warning: key %s in pretrained dictionary not found in current model." % key)
self.load_state_dict(deltalm_loaded_state_dict, strict=True)
print("Load DeltaLM's encoder from {0}".format(args.pretrained_deltalm_checkpoint))
class DeltaLMDecoder(TransformerDecoderBase):
def __init__(self, args, embed_tokens, no_encoder_attn=False, opt=None):
if opt is not None:
print("Overriding dropout values for DeltaLM....")
args.decoder_layerdrop = opt.death_rate_decoder
args.activation_dropout = opt.ffn_dropout
super().__init__(args, embed_tokens, no_encoder_attn, opt)
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self.state_dict(),
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=False,
)
self.load_state_dict(deltalm_loaded_state_dict, strict=False)
print("Load DeltaLM's decoder from {0}".format(args.pretrained_deltalm_checkpoint))
self.model_size = args.decoder_embed_dim
self.switchout = 0.0
self.adapter = None
if opt is not None and opt.decoder_adapter > 0:
print("[INFO] Adding MBART Adapters for %d languages" % opt.n_languages)
self.add_adapters(opt.n_languages)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = DeltaLMDecoderLayer(args, no_encoder_attn)
return layer
class DeltaLMDecoderLayer(TransformerDecoderLayerBase):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super(TransformerDecoderLayerBase, self).__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = nn.Dropout(
args.dropout
)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = nn.Dropout(
float(activation_dropout_p)
)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim
)
self.fc3 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim
)
self.fc4 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim
)
self.ffn_layer_norm = LayerNorm(self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.checkpoint_activations = args.checkpoint_activations
self.activation_fn_name = args.activation_fn
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
# TODO: add incremental states
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
incremental=False, incremental_cache=None,
max_len=None, cu_seqlens=None,
max_len_kv=None, cu_seqlens_kv=None,
**kwargs
):
"""
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
if need_head_weights:
need_attn = True
###############################################
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False,
checkpointing=checkpointing_self_attn,
cu_seqlens = cu_seqlens, max_len = max_len,
incremental=incremental, incremental_cache=incremental_cache
)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.ffn_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc3.weight, self.fc4.weight]
biases = [self.fc3.bias, self.fc4.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc3(x))
x = self.activation_dropout_module(x)
x = self.fc4(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.ffn_layer_norm(x)
###############################################
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
checkpointing=checkpointing_cross_attn,
cu_seqlens=cu_seqlens, max_len=max_len,
cu_seqlens_kv=cu_seqlens_kv, max_len_kv=max_len_kv,
incremental=incremental, incremental_cache=incremental_cache
)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, incremental_cache
class OmniDeltaLMDecoderLayer(DeltaLMDecoderLayer):
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
stack=None,
**kwargs
):
"""
Args:
x: [T x B x D]
encoder_out: [T x B x D]
encoder_padding_mask: [B x T]
self_attn_mask: [B x T] or [T x T]?
self_attn_padding_mask: [B x T]
need_attn:
need_head_weights:
checkpointing_ffn:
checkpointing_self_attn:
checkpointing_cross_attn:
stack: a list of previously used inputs (used for all-attention)
**kwargs:
Returns:
"""
if need_head_weights:
need_attn = True
###############################################
residual = x
# should we need layer norm anymore? (probably)
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False,
checkpointing=checkpointing_self_attn
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
# x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.ffn_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc3.weight, self.fc4.weight]
biases = [self.fc3.bias, self.fc4.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc3(x))
x = self.activation_dropout_module(x)
x = self.fc4(x)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.ffn_layer_norm(x)
###############################################
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
checkpointing=checkpointing_cross_attn
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None
| 17,669
| 34.841785
| 103
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
# from fairseq import utils
# from fairseq.incremental_decoding_utils import with_incremental_state
# from fairseq.modules.fairseq_dropout import FairseqDropout
# from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
class MultiHeadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
# if (
# not is_tpu # don't use PyTorch version on TPUs
# and incremental_state is None
# and not static_kv
# # A workaround for quantization to work. Otherwise JIT compilation
# # treats bias in linear module as method.
# and not torch.jit.is_scripting()
# ):
# assert key is not None and value is not None
#
#
# if incremental_state is not None:
# saved_state = self._get_input_buffer(incremental_state)
# if saved_state is not None and "prev_key" in saved_state:
# # previous time steps are cached - no need to recompute
# # key and value if they are static
# if static_kv:
# assert self.encoder_decoder_attention and not self.self_attention
# key = value = None
# else:
# saved_state = None
#
# if self.self_attention:
# q = self.q_proj(query)
# k = self.k_proj(query)
# v = self.v_proj(query)
# elif self.encoder_decoder_attention:
# # encoder-decoder attention
# q = self.q_proj(query)
# if key is None:
# assert value is None
# k = v = None
# else:
# k = self.k_proj(key)
# v = self.v_proj(key)
#
# else:
# assert key is not None and value is not None
# q = self.q_proj(query)
# k = self.k_proj(key)
# v = self.v_proj(value)
# q *= self.scaling
#
# if self.bias_k is not None:
# assert self.bias_v is not None
# k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
# v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
# if attn_mask is not None:
# attn_mask = torch.cat(
# [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
# )
# if key_padding_mask is not None:
# key_padding_mask = torch.cat(
# [
# key_padding_mask,
# key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
# ],
# dim=1,
# )
#
# q = (
# q.contiguous()
# .view(tgt_len, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
# if k is not None:
# k = (
# k.contiguous()
# .view(-1, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
# if v is not None:
# v = (
# v.contiguous()
# .view(-1, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
#
# if saved_state is not None:
# # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
# if "prev_key" in saved_state:
# _prev_key = saved_state["prev_key"]
# assert _prev_key is not None
# prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
# if static_kv:
# k = prev_key
# else:
# assert k is not None
# k = torch.cat([prev_key, k], dim=1)
# src_len = k.size(1)
# if "prev_value" in saved_state:
# _prev_value = saved_state["prev_value"]
# assert _prev_value is not None
# prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
# if static_kv:
# v = prev_value
# else:
# assert v is not None
# v = torch.cat([prev_value, v], dim=1)
# prev_key_padding_mask: Optional[Tensor] = None
# if "prev_key_padding_mask" in saved_state:
# prev_key_padding_mask = saved_state["prev_key_padding_mask"]
# assert k is not None and v is not None
# key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
# key_padding_mask=key_padding_mask,
# prev_key_padding_mask=prev_key_padding_mask,
# batch_size=bsz,
# src_len=k.size(1),
# static_kv=static_kv,
# )
#
# saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
# saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
# saved_state["prev_key_padding_mask"] = key_padding_mask
# # In this branch incremental_state is never None
# assert incremental_state is not None
# incremental_state = self._set_input_buffer(incremental_state, saved_state)
# assert k is not None
# assert k.size(1) == src_len
#
# # This is part of a workaround to get around fork/join parallelism
# # not supporting Optional types.
# if key_padding_mask is not None and key_padding_mask.dim() == 0:
# key_padding_mask = None
#
# if key_padding_mask is not None:
# assert key_padding_mask.size(0) == bsz
# assert key_padding_mask.size(1) == src_len
#
# if self.add_zero_attn:
# assert v is not None
# src_len += 1
# k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
# v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
# if attn_mask is not None:
# attn_mask = torch.cat(
# [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
# )
# if key_padding_mask is not None:
# key_padding_mask = torch.cat(
# [
# key_padding_mask,
# torch.zeros(key_padding_mask.size(0), 1).type_as(
# key_padding_mask
# ),
# ],
# dim=1,
# )
#
# attn_weights = torch.bmm(q, k.transpose(1, 2))
# attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
#
# assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
#
# if attn_mask is not None:
# attn_mask = attn_mask.unsqueeze(0)
# if self.onnx_trace:
# attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
# attn_weights += attn_mask
#
# if key_padding_mask is not None:
# # don't attend to padding symbols
# attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
# if not is_tpu:
# attn_weights = attn_weights.masked_fill(
# key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
# float("-inf"),
# )
# else:
# attn_weights = attn_weights.transpose(0, 2)
# attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
# attn_weights = attn_weights.transpose(0, 2)
# attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
#
# if before_softmax:
# return attn_weights, v
#
# attn_weights_float = utils.softmax(
# attn_weights, dim=-1, onnx_trace=self.onnx_trace
# )
# attn_weights = attn_weights_float.type_as(attn_weights)
# attn_probs = self.dropout_module(attn_weights)
#
# assert v is not None
# attn = torch.bmm(attn_probs, v)
# assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
# if self.onnx_trace and attn.size(1) == 1:
# # when ONNX tracing a single decoder step (sequence length == 1)
# # the transpose is a no-op copy before view, thus unnecessary
# attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
# else:
# attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
# attn = self.out_proj(attn)
# attn_weights: Optional[Tensor] = None
# if need_weights:
# attn_weights = attn_weights_float.view(
# bsz, self.num_heads, tgt_len, src_len
# ).transpose(1, 0)
# if not need_head_weights:
# # average attention weights over heads
# attn_weights = attn_weights.mean(dim=0)
#
# return attn, attn_weights
#
# @staticmethod
# def _append_prev_key_padding_mask(
# key_padding_mask: Optional[Tensor],
# prev_key_padding_mask: Optional[Tensor],
# batch_size: int,
# src_len: int,
# static_kv: bool,
# ) -> Optional[Tensor]:
# # saved key padding masks have shape (bsz, seq_len)
# if prev_key_padding_mask is not None and static_kv:
# new_key_padding_mask = prev_key_padding_mask
# elif prev_key_padding_mask is not None and key_padding_mask is not None:
# new_key_padding_mask = torch.cat(
# [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
# )
# # During incremental decoding, as the padding token enters and
# # leaves the frame, there will be a time when prev or current
# # is None
# elif prev_key_padding_mask is not None:
# if src_len > prev_key_padding_mask.size(1):
# filler = torch.zeros(
# (batch_size, src_len - prev_key_padding_mask.size(1)),
# device=prev_key_padding_mask.device,
# )
# new_key_padding_mask = torch.cat(
# [prev_key_padding_mask.float(), filler.float()], dim=1
# )
# else:
# new_key_padding_mask = prev_key_padding_mask.float()
# elif key_padding_mask is not None:
# if src_len > key_padding_mask.size(1):
# filler = torch.zeros(
# (batch_size, src_len - key_padding_mask.size(1)),
# device=key_padding_mask.device,
# )
# new_key_padding_mask = torch.cat(
# [filler.float(), key_padding_mask.float()], dim=1
# )
# else:
# new_key_padding_mask = key_padding_mask.float()
# else:
# new_key_padding_mask = prev_key_padding_mask
# return new_key_padding_mask
#
# @torch.jit.export
# def reorder_incremental_state(
# self,
# incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
# new_order: Tensor,
# ):
# """Reorder buffered internal state (for incremental generation)."""
# input_buffer = self._get_input_buffer(incremental_state)
# if input_buffer is not None:
# for k in input_buffer.keys():
# input_buffer_k = input_buffer[k]
# if input_buffer_k is not None:
# if self.encoder_decoder_attention and input_buffer_k.size(
# 0
# ) == new_order.size(0):
# break
# input_buffer[k] = input_buffer_k.index_select(0, new_order)
# incremental_state = self._set_input_buffer(incremental_state, input_buffer)
# return incremental_state
#
# def _get_input_buffer(
# self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
# ) -> Dict[str, Optional[Tensor]]:
# result = self.get_incremental_state(incremental_state, "attn_state")
# if result is not None:
# return result
# else:
# empty_result: Dict[str, Optional[Tensor]] = {}
# return empty_result
#
# def _set_input_buffer(
# self,
# incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
# buffer: Dict[str, Optional[Tensor]],
# ):
# return self.set_incremental_state(incremental_state, "attn_state", buffer)
#
# def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
# return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 19,975
| 40.272727
| 90
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/efficient_adapters.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import get_activation_fn
from onmt.modules.layer_norm import layer_norm_func
from onmt.modules.optimized.linear import Linear as LinearModule
def Linear(in_features, out_features, bias=True):
m = LinearModule(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class EfficientAdapter(nn.Module):
def __init__(
self,
num_modules: int,
input_size: int,
bottleneck_size: int,
activation_fn: str = "relu",
static_layernorm: bool = False,
):
"""
Implements an Adapter layer following the architecture of
Bapna and Firat 2019 - Simple, Scalable Adaptation for Neural Machine Translation
https://aclanthology.org/D19-1165/
This particular implementation uses a shared up/down projection matrix
for all adapters and in the forward-pass it simply indexes the
corresponding adapter (row). This is a workaround for an efficiency
bug that occurs in distributed training.
Args:
input_size (int): the dimensionality of the input feature vector
bottleneck_size (int): the dimensionality of the bottleneck vector
activation_fn (str): the activation function used after the down-projection
static_layernorm (bool): use LayerNorm without trainable parameters
"""
super().__init__()
# reuse the transformer Linear layer to have consistent init with the rest of the model
self.num_modules = num_modules
self.static_layernorm = static_layernorm
self.down_weight = Linear(bottleneck_size * input_size, num_modules, bias=False)
self.down_bias = Linear(bottleneck_size, num_modules, bias=False)
self.up_weight = Linear(bottleneck_size * input_size, num_modules, bias=False)
self.up_bias = Linear(input_size, num_modules, bias=False)
if not self.static_layernorm:
self.layer_norm_gammas = Linear(input_size, num_modules)
self.layer_norm_betas = Linear(input_size, num_modules)
self.activation = get_activation_fn(activation_fn)
# ensure normal initialization
# initialize the parameters of each "adapter" row similar to nn.Linear()
with torch.no_grad():
for i in range(num_modules):
self.down_weight.weight[i] = Linear(input_size, bottleneck_size).weight.view(-1)
self.up_weight.weight[i] = Linear(bottleneck_size, input_size).weight.view(-1)
self.down_bias.weight[i].fill_(0)
self.up_weight.weight[i].fill_(0)
if not self.static_layernorm:
self.layer_norm_gammas.weight[i].fill_(1)
self.layer_norm_betas.weight[i].fill_(0)
for n, p in self.named_parameters():
p.adapter = True
p.label = n
# Fused MLP config
self.fused = False
self.fused_function = None
if activation_fn == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif activation_fn == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def forward(self, x: torch.Tensor, index: int):
shortcut = x
down_w = self.down_weight.weight[index]
up_w = self.up_weight.weight[index]
down_b = self.down_bias.weight[index]
up_b = self.up_bias.weight[index]
ln_g = None
ln_b = None
if not self.static_layernorm:
# ensure ln_g will have mean of 1, instead of 0
ln_g = self.layer_norm_gammas.weight[index]
ln_b = self.layer_norm_betas.weight[index]
x = layer_norm_func(x, ln_g, ln_b, (shortcut.size(-1),))
if self.fused and x.is_cuda:
dropout_p = 0.0
weights = [down_w, up_w]
biases = [down_b, up_b]
x = self.fused_function(dropout_p, False, x, *weights, *biases)
else:
x = F.linear(x, down_w.view(-1, shortcut.size(-1)), down_b)
x = self.activation(x)
x = F.linear(x, up_w.view(shortcut.size(-1), -1), up_b)
return x + shortcut
| 4,610
| 38.75
| 96
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/positional_embeddings.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = make_positions(
input, self.padding_idx,
)
max_position = self.max_positions - 1
positions = torch.clamp(positions, 0, max_position)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(
input, self.padding_idx
)
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
| 6,620
| 36.619318
| 94
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/utils.py
|
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
import numpy as np
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
import torch.nn.functional as F
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
| 1,520
| 24.779661
| 91
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/transformer_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq import utils
# from onmt.models.speech_recognizer.fairseq_wav2vec2.fairseq_modules import MultiheadAttention
from onmt.modules.layer_norm import LayerNorm
# from fairseq.modules import LayerNorm, MultiheadAttention
# from fairseq.modules.fairseq_dropout import FairseqDropout
# from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
from .utils import get_activation_fn
from .multihead_attention import MultiHeadAttention
def dropout_residual_connection(x, residual, dropout_module, is_training):
return dropout_add_jit(x, residual, dropout_module.p, is_training)
@torch.jit.script
def dropout_add_jit(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x, p=prob, training=is_training)
out = residual + out
return out
def linear_act_linear(x, fc1, fc2, prob, is_training, activation_func):
out = fc1(x)
out = activation_func(out)
out = torch.nn.functional.dropout(out, p=prob, training=is_training)
out = fc2(out)
return out
class TransformerEncoderLayerBase(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder_embed_dim
# self.quant_noise = cfg.quant_noise.pq
# self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.self_attn = self.build_self_attention(self.embed_dim, cfg)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = nn.Dropout(cfg.dropout)
self.activation_fn = get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = nn.Dropout(activation_dropout_p)
self.normalize_before = cfg.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.encoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
cfg.encoder_ffn_embed_dim,
self.embed_dim
)
self.checkpoint_activations = cfg.checkpoint_activations
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = cfg.activation_fn
# Fused MLP config
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
# Adapter config
self.n_languages = -1
self.has_adapter = False
def build_fc1(self, input_dim, output_dim, *args):
return nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim, *args):
return nn.Linear(input_dim, output_dim)
def build_self_attention(self, embed_dim, cfg):
from pretrain_module.modeling_mbart import MBartAttention
return MBartAttention(
embed_dim=embed_dim,
num_heads=cfg.encoder_attention_heads,
dropout=cfg.attention_dropout,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
max_len=None, cu_seqlens=None,
**kwargs
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _, _ = self.self_attn(
hidden_states=x,
attention_mask=encoder_padding_mask,
output_attentions=False,
max_len=max_len, cu_seqlens=cu_seqlens
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, False, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerDecoderLayerBase(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = cfg.decoder_embed_dim
self.dropout_module = nn.Dropout(cfg.dropout)
self.cross_self_attention = cfg.cross_self_attention
self.self_attn = self.build_self_attention(
self.embed_dim,
cfg,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = nn.Dropout(float(activation_dropout_p))
self.normalize_before = cfg.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.decoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
cfg.decoder_ffn_embed_dim,
self.embed_dim
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
elf.checkpoint_activations
# self.activation_fn_name = cfg.activation_fn
# self.fused = False
# self.fused_function = None
# if self.activation_fn_name == 'relu':
# from onmt.modules.mlp.mlp import mlp_relu_function
# if mlp_relu_function is not None:
# self.fused_function = mlp_relu_function
# self.fused = True
# elif self.activation_fn_name == 'gelu':
# from onmt.modules.mlp.mlp import mlp_gelu_function
# if mlp_gelu_function is not None:
# self.fused_function = mlp_gelu_function
# self.fused = True
def build_fc1(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False
):
from pretrain_module.modeling_mbart import MBartAttention
return MBartAttention( # MBartAutoRegressiveSelfAttentionSLow(
embed_dim=embed_dim,
num_heads=cfg.decoder_attention_heads,
dropout=cfg.attention_dropout,
is_decoder=True,
)
# return MultiHeadAttention(
# embed_dim,
# cfg.decoder_attention_heads,
# dropout=cfg.attention_dropout,
# add_bias_kv=add_bias_kv,
# add_zero_attn=add_zero_attn,
# self_attention=not cfg.cross_self_attention
# )
def build_encoder_attention(self, embed_dim, cfg):
from pretrain_module.modeling_mbart import MBartCrossAttention
return MBartCrossAttention(
embed_dim,
cfg.decoder_attention_heads,
dropout=cfg.attention_dropout,
)
# return MultiHeadAttention(
# embed_dim,
# cfg.decoder_attention_heads,
# kdim=cfg.encoder_embed_dim,
# vdim=cfg.encoder_embed_dim,
# dropout=cfg.attention_dropout,
# encoder_decoder_attention=True
# )
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
**kwargs
):
"""
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
# incremental=incremental, incremental_cache=incremental_cache,
# checkpointing=checkpointing_cross_attn,
# lang=lang, atb=atb
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None
| 14,593
| 35.303483
| 95
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/activation_functions.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
| 507
| 27.222222
| 91
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/models/deltalm/modules/layer_drop.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
LayerDrop as described in https://arxiv.org/abs/1909.11556.
"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
"""
A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
We refresh the choice of which layers to drop every time we iterate
over the LayerDropModuleList instance. During evaluation we always
iterate over all layers.
Usage::
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p, modules=None):
super().__init__(modules)
self.p = p
def __iter__(self):
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or (dropout_probs[i] > self.p):
yield m
| 1,408
| 31.022727
| 71
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/metrics/gleu.py
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: GLEU Score
#
# Copyright (C) 2001-2017 NLTK Project
# Authors:
# Contributors:
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
""" GLEU score implementation. """
from __future__ import division
from collections import Counter
from nltk.util import ngrams, everygrams
from nltk.compat import string_types
try:
from fractions import Fraction
Fraction(0, 1000, _normalize=False)
except TypeError:
from nltk.compat import Fraction
def sentence_gleu(reference, hypothesis, min_len=1, max_len=4):
"""
Calculates the sentence level GLEU (Google-BLEU) score described in
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
Bridging the Gap between Human and Machine Translation.
eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
Retrieved on 27 Oct 2016.
From Wu et al. (2016):
"The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective."
Note: The initial implementation only allowed a single reference, but now
a list of references is required (which is consistent with
bleu_score.sentence_bleu()).
The infamous "the the the ... " example
>>> ref = 'the cat is on the mat'.split()
>>> hyp = 'the the the the the the the'.split()
>>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS
0.0909...
An example to evaluate normal machine translation outputs
>>> ref1 = str('It is a guide to action that ensures that the military '
... 'will forever heed Party commands').split()
>>> hyp1 = str('It is a guide to action which ensures that the military '
... 'always obeys the commands of the party').split()
>>> hyp2 = str('It is to insure the troops forever hearing the activity '
... 'guidebook that party direct').split()
>>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS
0.4393...
>>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS
0.1206...
:param references: a list of reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: the sentence level GLEU score.
:rtype: float
"""
hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
ref_ngrams = Counter(everygrams(reference, min_len, max_len))
tpfp = sum(hyp_ngrams.values())
tpfn = sum(ref_ngrams.values())
overlap_ngrams = ref_ngrams & hyp_ngrams
tp = sum(overlap_ngrams.values()) # True positives, i.e. numerator.
n_all = max(tpfp, tpfn) # denominator.
this_gleu = Fraction(tp, n_all, _normalize=False)
return (float(this_gleu),)
# While GLEU is defined as the minimum of precision and
# recall, we can reduce the number of division operations by one by
# instead finding the maximum of the denominators for the precision
# and recall formulae, since the numerators are the same:
# precision = tp / tpfp
# recall = tp / tpfn
# gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
#~ return corpus_gleu(
#~ [references],
#~ [hypothesis],
#~ min_len=min_len,
#~ max_len=max_len
#~ )
#~ def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4):
"""
Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average
precision), Wu et al. (2016) sum up the matching tokens and the max of
hypothesis and reference tokens for each sentence, then compute using the
aggregate values.
From Mike Schuster (via email):
"For the corpus, we just add up the two statistics n_match and
n_all = max(n_all_output, n_all_target) for all sentences, then
calculate gleu_score = n_match / n_all, so it is not just a mean of
the sentence gleu scores (in our case, longer sentences count more,
which I think makes sense as they are more difficult to translate)."
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5673...
The example below show that corpus_gleu() is different from averaging
sentence_gleu() for hypotheses
>>> score1 = sentence_gleu([ref1a], hyp1)
>>> score2 = sentence_gleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6144...
:param list_of_references: a list of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: The corpus-level GLEU score.
:rtype: float
"""
# sanity check
#~ assert len(list_of_references) == len(hypotheses), "The number of hypotheses and their reference(s) should be the same"
# Keep tracks of the sum matches (numerator) and
# max-token-lengths (denominator) over all sentences.
#~ _gleu_score = Fraction(0, -1, _normalize=False)
#~
#~ for references, hypothesis in zip(list_of_references, hypotheses):
#~ hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
#~ tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
#~
#~ highest_gleu_score = Fraction(0, -1, _normalize=False)
#~ for reference in references:
#~ ref_ngrams = Counter(everygrams(reference, min_len, max_len))
#~ tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
#~
#~ overlap_ngrams = ref_ngrams & hyp_ngrams
#~ tp = sum(overlap_ngrams.values()) # True positives, i.e. numerator.
#~
#~ # While GLEU is defined as the minimum of precision and
#~ # recall, we can reduce the number of division operations by one by
#~ # instead finding the maximum of the denominators for the precision
#~ # and recall formulae, since the numerators are the same:
#~ # precision = tp / tpfp
#~ # recall = tp / tpfn
#~ # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
#~ n_all = max(tpfp, tpfn) # denominator.
#~
#~ this_gleu = Fraction(tp, n_all, _normalize=False)
#~ print(this_gleu)
#~ print(highest_gleu_score)
#~
#~
#~ if (float(this_gleu) > float(highest_gleu_score)):
#~ highest_gleu_score = this_gleu
#~ print("KJEHKE")
#~
#~
#~ # use the reference yielding the highest score
#~ if highest_gleu_score and highest_gleu_score.denominator > 0:
#~ print(_gleu_score, highest_gleu_score)
#~ print(_gleu_score.numerator)
#~ print(highest_gleu_score.numerator)
#~ print(_gleu_score.numerator)
#~ _gleu_score.numerator += highest_gleu_score.numerator
#~ _gleu_score.denominator += highest_gleu_score.denominator
# corner cases:
# - no ngram matches (numerator == 0) --- just return zero.
# - empty corpus or empty references (denominator == -1) --- don't divide by zero !
#~ if _gleu_score.numerator & _gleu_score.denominator < 1:
#~ return float(_gleu_score)
#~ else:
#~ return float(_gleu_score)
| 10,546
| 44.461207
| 126
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/metrics/hit.py
|
from onmt.metrics.gleu import sentence_gleu
import math
# hit is the metrics for getting rare words copied
class HitMetrics(object):
def __init__(self, alpha=0.5):
self.alpha = alpha
def hit(self, reference, hypothesis):
index = -1;
alpha=self.alpha
for i in range(len(reference) - 3):
if(index < 0 and reference[i] == "." and reference[i+1] == ";" and reference[i+2] == "."):
index = i;
pureRef= reference[:index]+[reference[-1]]
refWords = reference[index+3:-1]
gleu = sentence_gleu(pureRef,hypothesis)[0]
hit = calculateHits(refWords,hypothesis)
combined_score = alpha * max(hit, 0) + (1.0-alpha) * gleu
return (combined_score, gleu, hit)
def calculateHits(reference, hypothesis):
phrases = " ".join(reference).split(";")
hit = 0;
count = 0;
for p in phrases:
pattern = p.strip().split();
if (len(pattern) > 0):
count +=1;
for i in range(len(hypothesis)):
j = 0;
while(j < len(pattern) and i+j < len(hypothesis) and pattern[j] == hypothesis[i+j]):
j +=1;
if(j == len(pattern)):
hit +=1;
break;
if(count == 0):
return -1
else:
return 1.0*hit/count;
| 1,396
| 27.510204
| 102
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/metrics/bleu.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEU metric implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import subprocess
import tempfile
import numpy as np
from os.path import isfile
from six.moves import urllib
def moses_multi_bleu(hypFileName, refFileName, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Args:
hypotheses: A numpy array of strings where each string is a single example.
references: A numpy array of strings where each string is a single example.
lowercase: If true, pass the "-lc" flag to the multi-bleu script
Returns:
The BLEU score as a float32 value.
"""
# Get MOSES multi-bleu script
eval_file = "/tmp/multi-bleu.perl"
if not isfile(eval_file):
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl", eval_file)
os.chmod(multi_bleu_path, 0o755)
multi_bleu_path = eval_file
# Calculate BLEU using multi-bleu script
with open(hypFileName, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [refFileName]
try:
bleu_out = subprocess.check_output(
bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
print("multi-bleu.perl script returned non-zero exit code")
print(error.output)
bleu_score = np.float32(0.0)
# Close temp files
return np.float32(bleu_score)
| 2,481
| 29.268293
| 79
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/metrics/__init__.py
|
from onmt.metrics.bleu import *
from onmt.metrics.gleu import *
from onmt.metrics.sbleu import sentence_bleu
# For flake8 compatibility.
__all__ = []
| 151
| 20.714286
| 44
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/metrics/sbleu.py
|
import sys
import math
ngramLength = 4;
smoothingConstant=0.1
bpSmoothingConstant=1.5
def getCounts(words):
counts = {}
for i in range(len(words)):
ngram = []
for j in range(ngramLength):
if(i+j < len(words)):
ngram.append(words[i+j])
if(" ".join(ngram) in counts):
counts[" ".join(ngram)] += 1
else:
counts[" ".join(ngram)] = 1
return counts
def getRefCounts(ref):
count = getCounts(ref)
length = len(ref)
return count, length
#~ file = open(filename)
#~
#~ line = file.readline()
#~
#~ counts = []
#~ length = []
#~
#~ while(line):
#~
#~ counts.append(getCounts(line.split()))
#~ length.append(len(line.split()))
#~ line = file.readline()
#~ return counts,length
def countMatches(hyp, ref):
counts = [0] * ngramLength
found = {}
for i in range(len(hyp)):
ngram = []
for j in range(ngramLength):
if(i+j < len(hyp)):
ngram.append(hyp[i+j])
if(" ".join(ngram) in ref and (" ".join(ngram) not in found or found[" ".join(ngram)] < ref[" ".join(ngram)])):
counts[j] += 1
if(" ".join(ngram) in found):
found[" ".join(ngram)] += 1;
else:
found[" ".join(ngram)] = 1;
return counts
def calcBLEU(counts,length,referenceLength):
result = 1;
for i in range(ngramLength):
if(length -i > 0):
#cannot calculte 4-gram precision for sentence length 3
result *= 1.0*(counts[i]+smoothingConstant)/(length-i+smoothingConstant)
result = pow(result,1.0/ngramLength);
if(length > referenceLength):
return result
else:
if(length == 0):
return math.exp(1.0-(referenceLength+bpSmoothingConstant)/1)*result
return math.exp(1.0-(referenceLength+bpSmoothingConstant)/length)*result
def calc(refCounts,refLength,hyp):
target = hyp
count = countMatches(target, refCounts)
s = calcBLEU(count, len(target), refLength)
return s
#~ file = open(filename)
#~
#~ out = open(outname,'w')
#~ line = file.readline()
#~
#~ bestScores = []
#~ firstScores = []
#~
#~ while(line):
#~ number = int(line.split("|||")[0])
#~ target = line.split("|||")[1].split()
#~ count = countMatches(target,refCounts[number])
#~ s = calcBLEU(count,len(target),refLength[number])
#~ print >>out,s
#~ if(number < len(bestScores)):
#~ if(bestScores[number] < s):
#~ bestScores[number] = s;
#~ else:
#~ firstScores.append(s)
#~ bestScores.append(s)
#~ line = file.readline()
#~
#~ avg = sum(firstScores)/len(firstScores)
#~ oracle = sum(bestScores)/len(bestScores)
#~ print "First hypothesis: ",avg
#~ print "Oracle score: ",oracle
# inputs are lists of words
def sentence_bleu(ref, hyp):
refCounts, refLength = getRefCounts(ref)
sbleu = calc(refCounts,refLength,hyp)
return (sbleu,)
| 3,215
| 26.487179
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/reversible_models/reversible.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
class ReversibleTransformerEncoder(nn.Module):
def __init__(self, opt, death_rate=0.0):
self.variational = opt.variational_dropout
d_model = opt.model_size
p = opt.dropout
self.death_rate = death_rate
self.dropout = p
h = opt.n_heads
attn_p = opt.attn_dropout
n_layers = opt.layers
super().__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
ff_p = opt.dropout
self.feedforward = FeedForward(opt.model_size, opt.inner_size, ff_p, variational=self.variational)
| 1,099
| 34.483871
| 106
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/reversible_models/relative_transformers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
# from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
# from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.layer_norm import LayerNorm
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..modules.optimized.compat import custom_fwd, custom_bwd
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..modules.optimized.compat import half_function
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class RelativeSelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = LayerNorm((opt.model_size,), elementwise_affine=True)
# self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
# self.attn = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size, opt.model_size // opt.n_heads,
# dropatt=opt.attn_dropout)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, dropout=opt.attn_dropout,
learnable_pos=opt.learnable_pos)
self.variational = opt.variational_dropout
def forward(self, input, pos, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, pos, attn_mask, incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=opt.variational_dropout, glu=opt.ffn_glu,
activation=opt.ffn_activation)
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = EncdecMultiheadAttn(opt.n_heads, opt.model_size, attn_drop=opt.attn_dropout)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class ReversibleEncoderFunction(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, layers, hidden_states, pos, attn_mask):
# attn_output, hidden_states = hidden_states, hidden_states # torch.chunk(hidden_states, 2, dim=-1)
first_input, second_input = hidden_states, hidden_states
# this block should be run under torch.no_grad()?
with torch.no_grad():
for layer in layers:
# forward pass in the layer
first_input, second_input = layer(
first_input, second_input, pos, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(first_input, second_input, pos)
ctx.layers = layers
ctx.attn_mask = attn_mask # just in case attn_mask is None
with torch.no_grad():
output = first_input + second_input
# The only memory footprint is the last layer outputs and the "output".
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
first_grad_output, second_grad_output = grad_output
# retrieve params from ctx
first_output, second_output, pos = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
first_input, hidden_states, first_grad_output, second_grad_output = layer.backward_pass(
first_output, second_output, first_grad_output, second_grad_output, pos, attn_mask
)
grad_hidden_states = first_grad_output + second_grad_output
# the position encodings don't need embeddings
return grad_hidden_states, None, None, None
@half_function
def reversible_encoder(layers, hidden_states, pos, attn_mask):
return ReversibleEncoderFunction.apply(layers, hidden_states, pos, attn_mask)
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = RelativeSelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, attn_mask=None):
"""
:param pos: position embeddings
:param x2:
:param x1:
:param attn_mask:
:return:
"""
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2)
z1, coverage = self.self_attn(x2, pos, attn_mask, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, pos, attn_mask=None):
"""
:param pos:
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
# res_hidden_states.backward(grad_hidden_states, retain_grah=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, pos, attn_mask)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, context, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
attn_output, hidden_states = hidden_states, hidden_states
for layer in layers:
idx = idx + 1
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, pos, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context, pos)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context, pos = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
pos, context.detach(), tgt_mask, src_mask
)
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = grad_attn_output + grad_hidden_states
return None, grad_hidden_states, grad_context, None, None, None, None
@half_function
def reversible_decoder(layers, hidden_states, pos, context, tgt_mask, src_mask, incremental, incremental_cache):
return ReversibleDecoderFunction.apply(layers, hidden_states, pos, context,
tgt_mask, src_mask, incremental, incremental_cache)
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = RelativeSelfAttention(opt)
self.feed_forward_first = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
self.feed_forward_second = FeedForward(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward1_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn1_cpu_state = torch.get_rng_state()
self.ffn1_gpu_devices, self.ffn1_gpu_states = get_device_states(*args)
def _init_feedforward2_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn2_cpu_state = torch.get_rng_state()
self.ffn2_gpu_devices, self.ffn2_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param pos:
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, pos, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z1 = f_x2 + x1
self._init_feedforward1_seed()
g_z1 = self.feed_forward_first(z1, cleaning=True)
z2 = x2 + g_z1
# print("self_attention", z.sum() / (z.size(0) * z.size(1)))
# if not self.ignore_source:
self._init_src_attention_seed()
h_z2, coverage, incremental_cache = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
y1 = z1 + h_z2
# prepare the state for the second function
self._init_feedforward2_seed(y1)
# print("y1", y1.sum() / (y1.size(0) * y1.size(1)))
k_y1 = self.feed_forward_second(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = z2 + k_y1
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, pos, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param pos:
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn2_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn2_cpu_state)
set_device_states(self.ffn2_gpu_devices, self.ffn2_gpu_states)
k_y1 = self.feed_forward_second(y1)
torch.autograd.backward(k_y1, dy2) # get the gradients dk/dy1
with torch.no_grad():
# restore z2 = Y2 - K(Y1)
z2 = y2 - k_y1
# Dz1 = DY1 + Y1.grad
dz1 = dy1 + y1.grad
del y2, k_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
z2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.src_attn_gpu_devices, enabled=True):
torch.set_rng_state(self.src_attn_cpu_state)
set_device_states(self.src_attn_gpu_devices, self.src_attn_gpu_states)
# if not self.ignore_source:
h_z2, _, _ = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
torch.autograd.backward(h_z2, dz1)
with torch.no_grad():
z1 = y1 - h_z2
del y1, h_z2
dz2 = dy2 + z2.grad
z2.grad = None
del dy2
grad_context = context.grad
del context.grad
# third block
with torch.enable_grad():
z1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn1_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn1_cpu_state)
set_device_states(self.ffn1_gpu_devices, self.ffn1_gpu_states)
g_z1, = self.feed_forward_second(z1, cleaning=True)
torch.autograd.backward(g_z1, dz2)
with torch.no_grad():
x2 = z2 - g_z1
del z2, g_z1
dx1 = dz1 + z1.grad
z1.grad = None
del dz1
# fourth block
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, _, _ = self.self_attention(x2, pos, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
x1 = z1 - f_x2
del z1, f_x2
dx2 = dz2 + x2.grad
x2.grad = None
del dz2
return x1, x2, dx1, dx2, grad_context
| 22,745
| 35.865478
| 115
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.