repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor
|
NMTGMinor-master/onmt/reversible_models/transformers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class SelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, q, q, attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size, ), elementwise_affine=True)
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.feedforward = position_wise_feed_forward(opt.model_size, opt.inner_size, opt.dropout,
variational=opt.variational_dropout)
self.dropout = opt.dropout
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, layers, attn_mask):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states = layer(
attn_output, hidden_states, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
# ctx.save_for_backward(attn_output, hidden_states)
ctx.layers = layers
ctx.attn_mask = attn_mask
with torch.no_grad():
output = attn_output + hidden_states
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
# print(grad_hidden_states.sum())
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
attn_output, hidden_states, grad_attn_output, grad_hidden_states = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states, attn_mask
)
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, None, None
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = SelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, attn_mask=None):
"""
:param x2:
:param x1:
:param attn_mask:
:return:
"""
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2)
z1, _, _ = self.self_attn(x2, attn_mask, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, attn_mask=None):
"""
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
# res_hidden_states.backward(grad_hidden_states, retain_graph=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, attn_mask)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, context, layers, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
# print(attn_output.sum()/B, hidden_states.sum()/B)
for layer in layers:
idx = idx + 1
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
# backprop
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
context.detach(), tgt_mask, src_mask
)
# with torch.no_grad():
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, grad_context, None, None, None, None, None
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = SelfAttention(opt)
self.feed_forward = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# print("x1", x1.sum() / (x1.size(0) * x2.size(1)))
# print("x2", x2.sum() / (x2.size(0) * x2.size(1)))
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z = f_x2
# print("self_attention", z.sum() / (z.size(0) * z.size(1)))
# if not self.ignore_source:
f_x2, coverage, incremental_cache = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
# print("src_attention", f_x2.sum() / (f_x2.size(0) * f_x2.size(1)))
f_x2 = f_x2 + z
del z
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
y1 = x1 + f_x2
# del f_x2, x1
# prepare the state for the second function
self._init_feedforward_seed(y1)
# print("y1", y1.sum() / (y1.size(0) * y1.size(1)))
g_y1 = self.feed_forward(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = x2 + g_y1
# print("y2", y2.sum() / (y2.size(0) * y2.size(1)))
del g_y1, x2
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
g_y1 = self.feed_forward(y1)
torch.autograd.backward(g_y1, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - g_y1
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del y2, g_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
x2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
z = f_x2
# if not self.ignore_source:
f_x2, _, _ = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2 = f_x2 + z
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - f_x2
del y1, f_x2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
grad_context = context.grad
del context.grad
# # third block
# with torch.enable_grad():
# x2.requires_grad = True
#
# with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
# torch.set_rng_state(self.attn_cpu_state)
# set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
#
# f_x2, _, _ = self.self_attention(x2, mask_tgt)
#
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
#
# torch.autograd.backward(f_x2, dz1)
#
# with torch.no_grad():
# # restore X1 = Y1 - F(X2)
# x1 = z1 - f_x2
#
# dx1 = dz1
# dx2 = dy2 + x2.grad
# del z1, f_x2
#
# x2.grad = None
# x2 = x2.detach()
return x1, x2, dx1, dx2, grad_context
| 20,232
| 35.001779
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/reversible_models/transformers_testing2.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class SelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, q, q, attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.feedforward = position_wise_feed_forward(opt.model_size, opt.inner_size, opt.dropout,
variational=opt.variational_dropout)
self.dropout = opt.dropout
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, layers, attn_mask):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states = layer(
attn_output, hidden_states, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
# ctx.save_for_backward(attn_output, hidden_states)
ctx.layers = layers
ctx.attn_mask = attn_mask
with torch.no_grad():
output = attn_output + hidden_states
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
# print(grad_hidden_states.sum())
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
attn_output, hidden_states, grad_attn_output, grad_hidden_states = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states, attn_mask
)
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, None, None
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = SelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, attn_mask=None):
"""
:param x2:
:param x1:
:param attn_mask:
:return:
"""
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
coin = True
if self.training:
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
self.forward_coin = coin
if coin:
self._init_attention_seed(x2)
z1, _, _ = self.self_attn(x2, attn_mask, cleaning=True)
if self.training and self.death_rate > 0:
z1 = z1 / (1 - self.death_rate)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
if self.training and self.death_rate > 0:
z2 = z2 / (1 - self.death_rate)
y2 = z2 + x2
del x1, x2, z1, z2
else:
y1 = x1
y2 = x2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, attn_mask=None):
"""
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
if not self.forward_coin: # this layer was skipped, just return
return y1, y2, dy1, dy2
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
if self.training and self.death_rate > 0:
z2 = z2 / (1 - self.death_rate)
# res_hidden_states.backward(grad_hidden_states, retain_graph=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, attn_mask)
if self.training and self.death_rate > 0:
z1 = z1 / (1 - self.death_rate)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, context, layers, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
# backprop
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
context.detach(), tgt_mask, src_mask
)
# with torch.no_grad():
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, grad_context, None, None, None, None, None
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = SelfAttention(opt)
self.feed_forward = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
self.forward_coin = coin
if coin:
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z = f_x2
# if not self.ignore_source:
f_x2, coverage, incremental_cache = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
f_x2 = f_x2 + z
del z
if self.training and self.death_rate > 0:
f_x2 = f_x2 / (1 - self.death_rate)
y1 = x1 + f_x2
del f_x2, x1
# prepare the state for the second function
self._init_feedforward_seed(y1)
g_y1 = self.feed_forward(y1, cleaning=True)
if self.training and self.death_rate > 0:
g_y1 = g_y1 / (1 - self.death_rate)
y2 = x2 + g_y1
del g_y1, x2
else:
y1, y2 = x1, x2
coverage = None
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
if not self.forward_coin: # this layer was skipped, just return
return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
g_y1 = self.feed_forward(y1)
if self.training and self.death_rate > 0:
g_y1 = g_y1 / (1 - self.death_rate)
torch.autograd.backward(g_y1, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - g_y1
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del y2, g_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
x2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
z = f_x2
# if not self.ignore_source:
f_x2, _, _ = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2 = f_x2 + z
if self.training and self.death_rate > 0:
f_x2 = f_x2 / (1 - self.death_rate)
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - f_x2
del y1, f_x2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
grad_context = context.grad
del context.grad
# # third block
# with torch.enable_grad():
# x2.requires_grad = True
#
# with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
# torch.set_rng_state(self.attn_cpu_state)
# set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
#
# f_x2, _, _ = self.self_attention(x2, mask_tgt)
#
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
#
# torch.autograd.backward(f_x2, dz1)
#
# with torch.no_grad():
# # restore X1 = Y1 - F(X2)
# x1 = z1 - f_x2
#
# dx1 = dz1
# dx2 = dy2 + x2.grad
# del z1, f_x2
#
# x2.grad = None
# x2 = x2.detach()
return x1, x2, dx1, dx2, grad_context
| 20,730
| 34.559177
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/reversible_models/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/speech/Augmenter.py
|
import math
import torch
from collections import defaultdict
import onmt
import random
class Augmenter(object):
"""
Implementation of the "Spec Augmentation" method
(Only vertical and horizontal masking)
"""
def __init__(self, F=8, mf=2, T=64, max_t=0.2, mt=2,
input_size=40, concat=4):
self.F = F
self.mf = mf
self.T = T
self.max_t = max_t
self.mt = mt
self.input_size = input_size
self.concat = concat
print("[INFO] Spec-Augmentation with input size %d F=%d, T=%d" % (self.input_size, F, T))
def augment(self, tensor):
feat_size = tensor.size(1)
original_len = tensor.size(0)
# reshape_size = feat_size / self.input_size
tensor = tensor.float()
# First we have to upsample the tensor (if it was downsampled during preprocessing)
# # Copy to a new storage because otherwise it is zeroed permanently`
tensor_ = tensor.view(-1, self.input_size).new(*tensor.size()).copy_(tensor)
for _ in range(self.mf):
# frequency masking (second dimension)
# 40 is the number of features (logmel)
f = int(random.uniform(0.0, self.F))
f_0 = int(random.uniform(0.0, 40 - f))
tensor_[:, f_0:f_0 + f].zero_()
for _ in range(self.mt):
# time masking (first dimension)
t = int(random.uniform(0.0, self.T))
t = min(t, int(self.max_t * original_len))
if original_len - t < 0:
continue
t_0 = int(random.uniform(0.0, original_len - t - 1))
tensor_[t_0: t_0 + t].zero_()
# reshaping back to downsampling
tensor__ = tensor_.view(original_len, feat_size)
return tensor__
| 1,821
| 26.19403
| 97
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/speech/ctc_loss.py
|
from distutils.version import LooseVersion
import numpy as np
import six
import torch
import torch.nn.functional as F
import onmt
class CTC(torch.nn.Module):
def __init__(self, vocab_size, hidden_size, dropout_rate,
ctc_type="builtin", reduce=True,
padding_idx=-1, blank_idx=0):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
if padding_idx == -1:
self.padding_idx = onmt.constants.PAD
else:
self.padding_idx = padding_idx
if blank_idx == -1:
self.blank_idx = onmt.constants.TGT_PAD
else:
self.blank_idx = blank_idx
# why do we need dropout at ctc ?
self.dropout_rate = dropout_rate
# In case of Pytorch >= 1.7.0, CTC will be always builtin
self.ctc_type = (
ctc_type
if LooseVersion(torch.__version__) < LooseVersion("1.7.0")
else "builtin"
)
if ctc_type != self.ctc_type:
logging.warning(f"CTC was set to {self.ctc_type} due to PyTorch version.")
if self.ctc_type == "builtin":
reduction_type = "sum" if reduce else "none"
self.ctc_loss = torch.nn.CTCLoss(blank=onmt.constants.TGT_PAD, reduction=reduction_type, zero_infinity=True)
elif self.ctc_type == "warpctc":
import warpctc_pytorch as warp_ctc
self.ctc_loss = warp_ctc.CTCLoss(size_average=False, length_average=False)
else:
raise ValueError(
'ctc_type must be "builtin" or "warpctc": {}'.format(self.ctc_type)
)
self.ignore_id = -1
self.reduce = reduce
def compute_loss(self, logits, targets, ilen, olen):
"""
:param logits:
:param targets:
:param ilen:
:param olen:
:return:
"""
if self.ctc_type == "builtin":
log_probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
# Use the deterministic CuDNN implementation of CTC loss to avoid
# [issue#17798](https://github.com/pytorch/pytorch/issues/17798)
with torch.backends.cudnn.flags(deterministic=True):
loss = self.ctc_loss(log_probs, targets, ilen, olen)
return loss
elif self.ctc_type == "warpctc":
return self.ctc_loss(logits, targets, ilen, olen)
else:
raise NotImplementedError
def forward(self, model_outputs, targets, **kwargs):
# context logits: T x B x V
# targets: T x B
logits = model_outputs['encoder_logits']
if 'wav2vec_padding_mask' in model_outputs:
source_mask = model_outputs['wav2vec_padding_mask'].long()
else:
source_mask = model_outputs['src_mask'].long()
# target mask should be T x B
target_mask = targets.ne(self.padding_idx)
target_lengths = target_mask.long().sum(0)
# source mask should be B x 1 x T or B x T
if source_mask.dim() == 3:
input_lengths = (1 - source_mask).squeeze(1).sum(1)
else:
input_lengths = (1 - source_mask).sum(1)
# print("MAX SOURCE LENGTH", logits.size(0), logits.size())
# print(input_lengths)
# print("MAX LENGTH", targets.size(0), targets.size())
# print(target_lengths)
if self.ctc_type == 'builtin':
# target is batch first
targets = targets.transpose(0, 1)
loss = self.compute_loss(logits, targets, input_lengths, target_lengths)
return loss
| 3,645
| 30.162393
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/speech/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/data/mmap_indexed_dataset.py
|
import os
import struct
import numpy as np
import torch
import torch.utils.data
from functools import lru_cache
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float32,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
# class MMapIndexedDataset(torch.utils.data.Dataset):
class MMapIndexedDataset(object):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
# return torch.from_numpy(np_array)
# to avoid the warning
return torch.from_numpy(np.array(np_array))
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int32):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
if isinstance(tensor, torch.Tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
else:
np_array = tensor.astype(self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| 6,566
| 27.184549
| 105
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/wav_dataset.py
|
import torch
import torchaudio as taudio
from functools import lru_cache
from onmt.utils import safe_readaudio
import numpy as np
import soundfile
import math
import torchaudio
import os
# this function reads wav file based on the timestamp in seconds
def safe_readaudio_from_cache(file_, wav_path, start=0.0, end=0.0, sample_rate=16000):
offset = math.floor(sample_rate * start)
num_frames = -1 if end <= start else math.ceil(sample_rate * (end - start))
if file_ is not None:
dtype = "float32"
frames = file_._prepare_read(offset, None, num_frames)
waveform = file_.read(frames, dtype, always_2d=True)
sample_rate_ = file_.samplerate
tensor = torch.from_numpy(waveform)
tensor = tensor[:, 0].unsqueeze(1)
else:
tensor = tensor[:, 0].unsqueeze(1)
# select the first channel?
# tensor has size [length, num_channel] in which channel should be 1 for wav2vec
return tensor
class WavDataset(torch.utils.data.Dataset):
def __init__(self, wav_path_list, cache_size=0):
"""
:param scp_path_list: list of path to the ark matrices
"""
self.wav_path_list = wav_path_list
self._sizes = len(self.wav_path_list)
self._dtype = torch.float32
if cache_size > 0:
self.cache = dict()
self.usage = dict()
else:
self.cache = None
self.cache_size = cache_size
def flush_cache(self):
if self.cache is not None:
for wav_path in self.cache:
self.cache[wav_path].close()
self.cache[wav_path] = None
self.cache = dict()
self.usage = dict()
@property
def dtype(self):
# I'm not sure when this function is called
return self._dtype
@property
def sizes(self):
return self._sizes
def __len__(self):
return self._sizes
def __getitem__(self, i):
wav_info = self.wav_path_list[i]
# it should be a tuple (wav_file, start, end)
wav_path, start, end, sample_rate = wav_info
# there are many utterances sharing the save wavfiles -> we can keep the same object in memory
if self.cache is not None:
# take the object in cache if exists
if wav_path in self.cache:
file_ = self.cache[wav_path]
self.usage[wav_path] = self.usage[wav_path] + 1
else:
# read the audio file
# print(os.path.exists(wav_path), wav_path)
try:
file_ = soundfile.SoundFile(wav_path, 'r')
except RuntimeError as e:
print("Wavpath invalid:", wav_path, os.path.exists(wav_path))
raise e
if len(self.cache) > self.cache_size:
# remove 1 file from cache based on lowest usage, maybe?
min_key = min(self.usage, key=self.usage.get)
if min_key != file_:
self.cache[min_key].close()
self.cache.pop(min_key, None)
self.usage.pop(min_key, None)
# add the object to the cache
self.cache[wav_path] = file_
self.usage[wav_path] = 1
data = safe_readaudio_from_cache(file_, wav_path, start, end, sample_rate)
else:
file_ = None
data = safe_readaudio(wav_path, start, end, sample_rate)
return data
| 3,544
| 30.371681
| 102
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/multistream_dataset.py
|
from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
class Stream(object):
# An object to manage the data within a stream
def __init__(self, src_data, tgt_data=None,
src_lang_data=None, tgt_lang_data=None,
src_type='text',
length_multiplier=1,
augmenter=None, upsampling=False,
**kwargs):
"""
:param src_data: list of source tensors
:param tgt_data: list of target tensors
:param src_lang_data: list of language features for the source (TB finished)
:param tgt_lang_data: list of language features for the target (TB finished)
:param src_type: text or audio
:param reshape_speech: the number of frames to be reshaped
:param augmenter: using augmentation for speech
:param merge: if the two sequences are going to be merged for Relative Transformer
"""
self.tensors = defaultdict(lambda: None)
self.has_target = False
self.src_type = src_type
# self.upsampling = upsampling
# self.feature_size = kwargs.get('feature_size', 40)
self.length_mutliplier = length_multiplier
if src_data is not None:
self.tensors['source'], self.tensors['source_pos'], self.src_lengths = \
self.collate(src_data,
type=self.src_type,
augmenter=augmenter)
self.tensors['src_length'] = self.src_lengths
self.src_size = sum(self.src_lengths)
else:
self.src_size = 0
if tgt_data is not None:
target_full, target_pos, self.tgt_lengths = self.collate(tgt_data)
# self.tensors['target'] = target_full
# self.tensors['target_input'] = target_full[:-1]
# the last sentence has one element (eos) missing
# self.tgt_lengths[-1] = self.tgt_lengths[-1] - 1
# self.tensors['target_output'] = target_full[1:]
# self.tensors['target_pos'] = target_pos[:-1]
self.tensors['target_input'], self.tensors['target_output'], \
self.tensors['target_pos'], self.tgt_lengths = self.collate(tgt_data, target=True)
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
self.has_target = True
self.tgt_size = sum([len(x) - 1 for x in tgt_data])
else:
self.tgt_size = 0
self.size = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
self.tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
self.tensors['target_lang'] = torch.cat(tgt_lang_data).long()
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
if self.src_type == 'text':
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
target_full = self.tensors['target']
self.tensors['target_input'] = target_full[:-1]
self.tensors['target_output'] = target_full[1:]
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# down sampling the speech signal by simply concatenating n features (reshaping)
def downsample(self, data):
if self.reshape_speech == 0:
return data
else:
concat = self.reshape_speech
tensor_ = data.float() # adding float because of fp16 data storage
add = (concat - tensor_.size()[0] % concat) % concat
z = torch.FloatTensor(add, tensor_.size()[1]).zero_()
# adding an additional dimension as padding
tensor_ = torch.cat((tensor_, z), 0)
tensor_ = tensor_.reshape((int(tensor_.size()[0] / concat), tensor_.size()[1] * concat))
return tensor_
def augment_speech(self):
return
def collate(self, data, type="text", augmenter=None, target=False):
"""
Assembling the individual sequences into one single tensor, included padding
:param target:
:param data: the list of sequences in chronological order
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
data (list of Torch.Tensor) size 1 x T
"""
if type == "text":
if not target:
lengths = torch.LongTensor([x.size(0) for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
# the last part is padded (so that the actual batch size divides by the multiplier
# tensor_length = math.ceil(sum(lengths) / self.length_mutliplier) * self.length_mutliplier
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
tensor = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0)
tensor.narrow(0, offset, current_length).copy_(sample)
offset += current_length
tensor = tensor.unsqueeze(1) # batch size is 1
return tensor, positions, lengths
else:
# because we take the last unit away
lengths = torch.LongTensor([x.size(0) - 1 for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
input = data[0].new(tensor_length).fill_(onmt.constants.PAD)
# create a placeholder for the data
target = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0) - 1
input.narrow(0, offset, current_length).copy_(sample[:-1])
target.narrow(0, offset, current_length).copy_(sample[1:])
offset += current_length
input = input.unsqueeze(1)
target = target.unsqueeze(1)
return input, target, positions, lengths
elif type == "audio":
raise NotImplementedError
#
# # First step: on-the-fly processing for the samples
# # Reshaping: either downsampling or upsampling
# # On the fly augmentation
# samples = []
#
# for i in range(len(data)):
# sample = data[i]
#
# if augmenter is not None:
# sample = augmenter.augment(sample)
#
# if self.upsampling:
# sample = sample.view(-1, self.feature_size)
#
# samples.append(sample)
#
# # compute the lengths afte on-the-fly processing
# lengths = [x.size(0) for x in samples]
#
# max_length = max(lengths)
#
# # allocate data for the batch speech
# feature_size = samples[0].size(1)
# batch_size = len(data)
#
# # feature size + 1 because the last dimension is created for padding
# tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(onmt.constants.PAD)
#
# for i in range(len(samples)):
# sample = samples[i]
#
# data_length = sample.size(0)
# offset = max_length - data_length if align_right else 0
#
# tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# # in padding dimension: 0 is not padded, 1 is padded
# tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
#
# return tensor, None, lengths
# else:
# raise NotImplementedError
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class StreamDataset(torch.utils.data.Dataset):
def __init__(self, src_data, tgt_data,
src_langs=None, tgt_langs=None,
batch_size_words=2048,
data_type="text", batch_size_sents=128,
multiplier=1, cleaning=False,
augment=False, debug=False,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
self.src = src_data
self._type = data_type
self.upsampling = kwargs.get('upsampling', False)
self.debug = debug
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
if src_data:
assert (len(self.src) == len(self.tgt))
else:
self.tgt = None
self.max_src_len = kwargs.get('max_src_len', None)
self.max_tgt_len = kwargs.get('max_tgt_len', 128)
if self.max_src_len is None:
if self._type == 'text':
self.max_src_len = 128
else:
self.max_src_len = 1024
# Remove the sentences that are empty
if cleaning:
cleaned_src = []
cleaned_tgt = []
n_removes = []
for i, (src_tensor, tgt_tensor) in enumerate(zip(self.src, self.tgt)):
src_size = src_tensor.size(0)
tgt_size = tgt_tensor.size(0)
if src_size < self.max_src_len and tgt_size < self.max_tgt_len:
cleaned_src.append(src_tensor)
cleaned_tgt.append(tgt_tensor)
else:
n_removes.append(i)
self.src = cleaned_src
self.tgt = cleaned_tgt
print("Removed %d sentences that are too long. " % len(n_removes))
# in stream dataset we don't sort data
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
if cleaning:
n_samples = len(src_langs)
if len(self.src_langs) > 1:
self.src_langs = [self.src_langs[i] for i in range(n_samples) and i not in n_removes]
if len(self.tgt_langs) > 1:
self.tgt_langs = [self.tgt_langs[i] for i in range(n_samples) and i not in n_removes]
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
else:
self.bilingual = False
self.fullSize = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = False
# group samples into mini-batches
self.streams = []
self.num_batches = 0
self.n_streams = 0
self.allocate_batch()
self.current_stream_index = 0
self.in_stream_index = 0
self.stream_order = None
if augment:
self.augmenter = Augmenter()
else:
self.augmenter = None
def size(self):
return self.fullSize
def switchout(self, batch):
pass
# This function allocates the mini-batches (grouping sentences with the same size)
def allocate_batch(self):
cur_stream = []
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
def oversize_(cur_batch, sent_size):
if len(cur_batch) == 0:
return False
if len(cur_batch) >= self.batch_size_sents:
return True
if cur_batch_size + sent_size > self.batch_size_words:
return True
return False
i = 0
while i < self.fullSize:
src_size = self.src[i].size(0) if self.src is not None else 0
tgt_size = self.tgt[i].size(0) if self.tgt is not None else 0
if self.debug:
print(i, src_size, tgt_size)
if self.tgt is not None and self.src is not None:
sentence_length = self.tgt[i].size(0) + self.src[i].size(0) - 1
elif self.tgt is not None:
sentence_length = self.tgt[i].size(0) - 1
else:
sentence_length = self.src[i].size(0)
# first of document or meet a blank line:
if i == 0 or src_size == 0 or tgt_size == 2:
if len(cur_batch) > 0:
if self.debug:
print("Created a batch: ", cur_batch)
cur_stream.append(cur_batch)
if len(cur_stream) > 0:
self.streams.append(cur_stream)
cur_stream = []
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
if src_size == 0 or tgt_size == 2: # blank line, move on
i = i + 1
continue
oversized = oversize_(cur_batch, sentence_length)
# if the current item makes the batch exceed max size
# then we create a new batch
if oversized:
# cut-off the current list to fit the multiplier
batch_ = cur_batch
cur_stream.append(batch_) # add this batch into the current stream
if self.debug:
print("Created a batch: ", batch_)
cur_batch = []
cur_batch_sizes = []
cur_batch_size = 0
cur_batch.append(i)
cur_batch_size += sentence_length
cur_batch_sizes.append(sentence_length)
i = i + 1
# catch the last batch
if len(cur_batch) > 0:
cur_stream.append(cur_batch)
# catch the last stream:
if len(cur_stream) > 0:
self.streams.append(cur_stream)
self.num_batches = sum([len(stream) for stream in self.streams])
self.n_streams = len(self.streams)
print("* Total %d streams collected." % self.n_streams)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
"""
:param index: the index of the mini-batch in the list
:return: Batch
"""
# print("!!! Stream dataset cannot be accessed with getitem ...")
# raise NotImplementedError
stream_id, batch_id = index
n_batches = len(self.streams[stream_id])
assert stream_id < self.n_streams, "%d > %d" % (stream_id, self.n_streams)
assert batch_id < n_batches, "%d > %d" % (batch_id, n_batches)
# access the batch
batch_ids = self.streams[stream_id][batch_id]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
batch = Stream(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling)
return batch
def __len__(self):
return self.num_batches
# genereate a new batch - order (static)
def create_order(self, random=True):
self.current_stream_index = 0
self.in_stream_index = 0
if random:
self.stream_order = torch.randperm(len(self.streams))
else:
self.stream_order = torch.arange(len(self.streams)).long()
return self.stream_order
# return the next batch according to the iterator
def next(self, curriculum=False, reset=True, split_sizes=1):
# reset iterator if reach data size limit
if self.current_stream_index >= self.n_streams:
if reset:
self.current_stream_index = 0
self.in_stream_index = 0
else:
return None
current_stream_size = len(self.streams[self.stream_order[self.current_stream_index]])
#
# if curriculum or self.batchOrder is None:
# batch_index = self.cur_index
# else:
# batch_index = self.batchOrder[self.cur_index]
batch_index = [self.stream_order[self.current_stream_index], self.in_stream_index]
batch = self[batch_index]
#
# move the iterator one step
self.in_stream_index += 1
# if the current stream runs out of batch: move to a new stream
if self.in_stream_index >= current_stream_size:
self.current_stream_index += 1
self.in_stream_index = 0
return [batch]
def is_new_stream(self):
# 1 because we will call this function after the "0" was given
return self.in_stream_index == 1
def shuffle(self):
data = list(zip(self.src, self.tgt))
self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
def set_index(self, iteration):
print("This jumping is not implemented for stream dataset. Use -reset_optim instead to start from beginning")
raise NotImplementedError
# assert (0 <= iteration < self.num_batches)
# self.cur_index = iteration
| 21,315
| 35.62543
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/whisper_audio.py
|
import os
from functools import lru_cache
from typing import Optional, Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(
os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor],
n_mels: int = N_MELS,
padding: int = 0,
device: Optional[Union[str, torch.device]] = None,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
if device is not None:
audio = audio.to(device)
if padding > 0:
audio = F.pad(audio, (0, padding))
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
| 4,767
| 31.435374
| 99
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/batch_utils.py
|
import numpy as np
# from .fast_extensions import
def _is_oversized(cur_batch, new_sent_size, cur_batch_sizes, batch_size_words, batch_size_sents):
# cur_batch_size = sum(cur_batch_sizes)
if len(cur_batch) == 0:
return False
if len(cur_batch) >= batch_size_sents:
return True
if max(max(cur_batch_sizes), new_sent_size) * (len(cur_batch) + 1) > batch_size_words:
return True
return False
def allocate_batch_slow(indices, lengths,
src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning=1):
batches = list()
batch = list()
cur_batch_size = 0
cur_batch_sizes = []
idx = 0
full_size = len(indices)
while idx < full_size:
i = indices[idx]
sent_length = lengths[i]
src_size = src_sizes[i] if src_sizes is not None else 0
tgt_size = tgt_sizes[i] if tgt_sizes is not None else 0
if cleaning == 1:
if not (min_src_len <= src_size < max_src_len and min_tgt_len <= tgt_size < max_tgt_len):
idx = idx + 1
continue
oversized = _is_oversized(batch, sent_length, cur_batch_sizes, batch_size_words, batch_size_sents)
if oversized:
current_size = len(batch)
scaled_size = max(
batch_size_multiplier * (current_size // batch_size_multiplier),
current_size % batch_size_multiplier)
batch_ = batch[:scaled_size]
batches.append(batch_) # add this batch into the batch list
batch = batch[scaled_size:] # reset the current batch
cur_batch_sizes = cur_batch_sizes[scaled_size:]
cur_batch_size = sum(cur_batch_sizes)
batch.append(i)
cur_batch_size += sent_length
cur_batch_sizes.append(sent_length)
idx = idx + 1
if len(batch) > 0:
batches.append(batch)
return batches
def _is_oversized_frames(cur_batch, new_size_frames, new_size_words,
cur_batch_size_frames, cur_batch_size_words,
batch_size_frames, batch_size_words, batch_size_sents,
cut_off_size, smallest_batch_size):
if len(cur_batch) == 0:
return False
if len(cur_batch) >= batch_size_sents:
return True
# check if the current batch is too long
if max(max(cur_batch_size_frames), new_size_frames) > cut_off_size:
if len(cur_batch) >= smallest_batch_size:
return True
# try adding the new utterance and check if its oversized in frame limit?
if max(max(cur_batch_size_frames), new_size_frames) * (len(cur_batch) + 1) > batch_size_frames:
return True
# try adding the new sentence and check if its oversized in word limit?
if max(max(cur_batch_size_words), new_size_words) * (len(cur_batch) + 1) > batch_size_words:
return True
return False
def allocate_batch_unbalanced_slow(indices, lengths,
src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning=1,
cut_off_size=240000, smallest_batch_size=4):
batches = list()
batch = list()
cur_batch_size_words = []
cur_batch_size_frames = []
idx = 0
full_size = len(indices)
while idx < full_size:
i = indices[idx]
sent_length = lengths[i]
src_size = src_sizes[i] if src_sizes is not None else 0
tgt_size = tgt_sizes[i] if tgt_sizes is not None else 0
if cleaning == 1:
if not (min_src_len <= src_size < max_src_len and min_tgt_len <= tgt_size < max_tgt_len):
idx = idx + 1
continue
oversized = _is_oversized_frames(batch, src_size, tgt_size,
cur_batch_size_frames, cur_batch_size_words,
batch_size_frames, batch_size_words, batch_size_sents,
cut_off_size, smallest_batch_size)
if oversized:
# trim the current batch so that batch size divides by the bsz multiplier
current_size = len(batch)
scaled_size = max(
batch_size_multiplier * (current_size // batch_size_multiplier),
current_size % batch_size_multiplier)
batch_ = batch[:scaled_size]
batches.append(batch_) # add this batch into the batch list
batch = batch[scaled_size:] # reset the current batch
cur_batch_size_words = cur_batch_size_words[scaled_size:]
cur_batch_size_frames = cur_batch_size_frames[scaled_size:]
batch.append(i)
cur_batch_size_words.append(tgt_size)
cur_batch_size_frames.append(src_size)
idx = idx + 1
if len(batch) > 0:
batches.append(batch)
return batches
def allocate_batch(indices, lengths,
src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning=1):
try:
import pyximport
cython_available = True
except ModuleNotFoundError as e:
cython_available = False
if not cython_available or (tgt_sizes is None or src_sizes is None):
return allocate_batch_slow(indices, lengths, src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning)
pyximport.install(setup_args={"include_dirs": np.get_include()},
inplace=True)
from .fast_extensions import fast_batch_allocate
cleaning = int(cleaning)
if isinstance(indices, list):
indices = np.asarray(indices)
# convert to np int64
return fast_batch_allocate(indices, lengths,
src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning)
def allocate_batch_unbalanced(indices, lengths,
src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning=1,
cut_off_size=180000, smallest_batch_size=4):
try:
import pyximport
cython_available = True
except ModuleNotFoundError as e:
cython_available = False
if not cython_available or (tgt_sizes is None or src_sizes is None):
return allocate_batch_unbalanced_slow(indices, lengths, src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning,
cut_off_size, smallest_batch_size)
pyximport.install(setup_args={"include_dirs": np.get_include()},
inplace=True)
from .fast_extensions import fast_batch_allocate_unbalance
cleaning = int(cleaning)
if isinstance(indices, list):
indices = np.asarray(indices)
# convert to np int64
return fast_batch_allocate_unbalance(indices,
src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, batch_size_multiplier,
max_src_len, max_tgt_len,
min_src_len, min_tgt_len, cleaning,
cut_off_size, smallest_batch_size)
| 8,598
| 36.064655
| 106
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import os
import sys
import types
import numpy as np
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| 823
| 22.542857
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/stream_dataset.py
|
from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
class Stream(object):
# An object to manage the data within a stream
def __init__(self, src_data, tgt_data=None,
src_lang_data=None, tgt_lang_data=None,
src_type='text',
length_multiplier=1,
augmenter=None, upsampling=False,
**kwargs):
"""
:param src_data: list of source tensors
:param tgt_data: list of target tensors
:param src_lang_data: list of language features for the source (TB finished)
:param tgt_lang_data: list of language features for the target (TB finished)
:param src_type: text or audio
:param reshape_speech: the number of frames to be reshaped
:param augmenter: using augmentation for speech
:param merge: if the two sequences are going to be merged for Relative Transformer
"""
self.tensors = defaultdict(lambda: None)
self.has_target = False
self.src_type = src_type
# self.upsampling = upsampling
# self.feature_size = kwargs.get('feature_size', 40)
self.length_mutliplier = length_multiplier
if src_data is not None:
self.tensors['source'], self.tensors['source_pos'], self.src_lengths = \
self.collate(src_data,
type=self.src_type,
augmenter=augmenter)
self.tensors['src_length'] = self.src_lengths
self.src_size = sum(self.src_lengths)
else:
self.src_size = 0
if tgt_data is not None:
target_full, target_pos, self.tgt_lengths = self.collate(tgt_data)
# self.tensors['target'] = target_full
# self.tensors['target_input'] = target_full[:-1]
# the last sentence has one element (eos) missing
# self.tgt_lengths[-1] = self.tgt_lengths[-1] - 1
# self.tensors['target_output'] = target_full[1:]
# self.tensors['target_pos'] = target_pos[:-1]
self.tensors['target_input'], self.tensors['target_output'], \
self.tensors['target_pos'], self.tgt_lengths = self.collate(tgt_data, target=True)
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
self.has_target = True
self.tgt_size = sum([len(x) - 1 for x in tgt_data])
else:
self.tgt_size = 0
self.size = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
self.tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
self.tensors['target_lang'] = torch.cat(tgt_lang_data).long()
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
if self.src_type == 'text':
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
target_full = self.tensors['target']
self.tensors['target_input'] = target_full[:-1]
self.tensors['target_output'] = target_full[1:]
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# down sampling the speech signal by simply concatenating n features (reshaping)
def downsample(self, data):
if self.reshape_speech == 0:
return data
else:
concat = self.reshape_speech
tensor_ = data.float() # adding float because of fp16 data storage
add = (concat - tensor_.size()[0] % concat) % concat
z = torch.FloatTensor(add, tensor_.size()[1]).zero_()
# adding an additional dimension as padding
tensor_ = torch.cat((tensor_, z), 0)
tensor_ = tensor_.reshape((int(tensor_.size()[0] / concat), tensor_.size()[1] * concat))
return tensor_
def augment_speech(self):
return
def collate(self, data, type="text", augmenter=None, target=False):
"""
Assembling the individual sequences into one single tensor, included padding
:param target:
:param data: the list of sequences in chronological order
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
data (list of Torch.Tensor) size 1 x T
"""
if type == "text":
if not target:
lengths = torch.LongTensor([x.size(0) for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
# the last part is padded (so that the actual batch size divides by the multiplier
# tensor_length = math.ceil(sum(lengths) / self.length_mutliplier) * self.length_mutliplier
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
tensor = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0)
tensor.narrow(0, offset, current_length).copy_(sample)
offset += current_length
tensor = tensor.unsqueeze(1) # batch size is 1
return tensor, positions, lengths
else:
# because we take the last unit away
lengths = torch.LongTensor([x.size(0) - 1 for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
input = data[0].new(tensor_length).fill_(onmt.constants.PAD)
# create a placeholder for the data
target = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0) - 1
input.narrow(0, offset, current_length).copy_(sample[:-1])
target.narrow(0, offset, current_length).copy_(sample[1:])
offset += current_length
input = input.unsqueeze(1)
target = target.unsqueeze(1)
return input, target, positions, lengths
elif type == "audio":
raise NotImplementedError
#
# # First step: on-the-fly processing for the samples
# # Reshaping: either downsampling or upsampling
# # On the fly augmentation
# samples = []
#
# for i in range(len(data)):
# sample = data[i]
#
# if augmenter is not None:
# sample = augmenter.augment(sample)
#
# if self.upsampling:
# sample = sample.view(-1, self.feature_size)
#
# samples.append(sample)
#
# # compute the lengths afte on-the-fly processing
# lengths = [x.size(0) for x in samples]
#
# max_length = max(lengths)
#
# # allocate data for the batch speech
# feature_size = samples[0].size(1)
# batch_size = len(data)
#
# # feature size + 1 because the last dimension is created for padding
# tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(onmt.constants.PAD)
#
# for i in range(len(samples)):
# sample = samples[i]
#
# data_length = sample.size(0)
# offset = max_length - data_length if align_right else 0
#
# tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# # in padding dimension: 0 is not padded, 1 is padded
# tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
#
# return tensor, None, lengths
# else:
# raise NotImplementedError
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class StreamDataset(torch.utils.data.Dataset):
def __init__(self, src_data, tgt_data,
src_langs=None, tgt_langs=None,
batch_size_words=2048,
data_type="text", batch_size_sents=128,
multiplier=1,
augment=False,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
self.src = src_data
self._type = data_type
self.upsampling = kwargs.get('upsampling', False)
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
if src_data:
assert (len(self.src) == len(self.tgt))
else:
self.tgt = None
# in stream dataset we don't sort data
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
else:
self.bilingual = False
self.fullSize = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = False
# group samples into mini-batches
self.batches = []
self.num_batches = 0
self.allocate_batch()
self.cur_index = 0
self.batchOrder = None
if augment:
self.augmenter = Augmenter()
else:
self.augmenter = None
def size(self):
return self.fullSize
def switchout(self, batch):
pass
# This function allocates the mini-batches (grouping sentences with the same size)
def allocate_batch(self):
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
def oversize_(cur_batch, sent_size):
if len(cur_batch) == 0:
return False
if len(cur_batch) >= self.batch_size_sents:
return True
if cur_batch_size + sent_size > self.batch_size_words:
return True
return False
i = 0
while i < self.fullSize:
if self.tgt is not None and self.src is not None:
sentence_length = self.tgt[i].size(0) + self.src[i].size(0) - 1
elif self.tgt is not None:
sentence_length = self.tgt[i].size(0) - 1
else:
sentence_length = self.src[i].size(0)
oversized = oversize_(cur_batch, sentence_length)
# if the current item makes the batch exceed max size
# then we create a new batch
if oversized:
# cut-off the current list to fit the multiplier
current_size = len(cur_batch)
scaled_size = max(
self.multiplier * (current_size // self.multiplier),
current_size % self.multiplier)
batch_ = cur_batch[:scaled_size]
self.batches.append(batch_) # add this batch into the batch list
cur_batch = cur_batch[scaled_size:] # reset the current batch
cur_batch_sizes = cur_batch_sizes[scaled_size:]
cur_batch_size = sum(cur_batch_sizes)
cur_batch.append(i)
cur_batch_size += sentence_length
cur_batch_sizes.append(sentence_length)
i = i + 1
# catch the last batch
if len(cur_batch) > 0:
self.batches.append(cur_batch)
self.num_batches = len(self.batches)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
"""
:param index: the index of the mini-batch in the list
:return: Batch
"""
assert index < self.num_batches, "%d > %d" % (index, self.num_batches)
batch_ids = self.batches[index]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
batch = Stream(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling)
return batch
def __len__(self):
return self.num_batches
# genereate a new batch - order (static)
def create_order(self, random=True):
# always generate in order of the data
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# return the next batch according to the iterator
def next(self, curriculum=False, reset=True, split_sizes=1):
# reset iterator if reach data size limit
if self.cur_index >= self.num_batches:
if reset:
self.cur_index = 0
else:
return None
if curriculum or self.batchOrder is None:
batch_index = self.cur_index
else:
batch_index = self.batchOrder[self.cur_index]
batch = self[batch_index]
# move the iterator one step
self.cur_index += 1
return [batch]
def shuffle(self):
data = list(zip(self.src, self.tgt))
self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
def set_index(self, iteration):
assert (0 <= iteration < self.num_batches)
self.cur_index = iteration
| 17,676
| 35.598344
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/dataset.py
|
from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
import numpy as np
from .batch_utils import allocate_batch, allocate_batch_unbalanced
import dill
"""
Data management for sequence-to-sequence models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
def merge_data(data, align_right=False, type='text', augmenter=None, upsampling=False,
feature_size=40, dataname="source", src_pad=1, tgt_pad=1 ):
"""
Assembling the individual sequences into one single tensor, included padding
:param tgt_pad:
:param src_pad:
:param dataname:
:param feature_size:
:param upsampling:
:param data: the list of sequences
:param align_right: aligning the sequences w.r.t padding
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
"""
# initialize with batch_size * length
# TODO: rewrite this function in Cython
if type == "text":
lengths = [x.size(0) for x in data]
# positions = [torch.arange(length_) for length_ in lengths]
max_length = max(lengths)
# if max_length > 8:
# max_length = math.ceil(max_length / 8) * 8
if dataname == "source":
tensor = data[0].new(len(data), max_length).fill_(src_pad)
elif dataname == "target":
tensor = data[0].new(len(data), max_length).fill_(tgt_pad)
else:
print("Warning: check the dataname")
raise NotImplementedError
pos = None
for i in range(len(data)):
data_length = data[i].size(0)
offset = max_length - data_length if align_right else 0
tensor[i].narrow(0, offset, data_length).copy_(data[i])
return tensor, pos, lengths
elif type in ["audio", "scp"]:
# First step: on-the-fly processing for the samples
# Reshaping: either downsampling or upsampling
# On the fly augmentation
samples = []
for i in range(len(data)):
sample = data[i]
if augmenter is not None:
sample = augmenter.augment(sample)
if upsampling:
sample = sample.view(-1, feature_size)
samples.append(sample)
# compute the lengths afte on-the-fly processing
lengths = [x.size(0) for x in samples]
max_length = max(lengths)
# max_length = math.ceil(max_length / 8) * 8
# allocate data for the batch speech
feature_size = samples[0].size(1)
batch_size = len(data)
# feature size + 1 because the last dimension is created for padding
tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(0)
for i in range(len(samples)):
sample = samples[i]
data_length = sample.size(0)
offset = max_length - data_length if align_right else 0
tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# in padding dimension: 1 is not padded, 0 is padded
tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
return tensor, None, lengths
elif type == 'wav':
samples = data
lengths = [x.size(0) for x in samples]
max_length = max(lengths)
# allocate data for the batch speech
feature_size = 1 # samples[0].size(1) # most likely 1
assert feature_size == 1, "expecting feature size = 1 but get %2.f" % feature_size
batch_size = len(data)
# feature size + 1 because the last dimension is created for padding
tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(0)
for i in range(len(samples)):
sample = samples[i]
# normalize
data_length = sample.size(0)
offset = max_length - data_length if align_right else 0
channels = 1
tensor[i].narrow(0, offset, data_length).narrow(1, 1, channels).copy_(sample)
# in padding dimension: 1 is not padded, 0 is padded
tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
return tensor, None, lengths
else:
raise NotImplementedError
def collate_fn(src_data, tgt_data,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
src_align_right, tgt_align_right,
src_type='text',
augmenter=None, upsampling=False,
bilingual=False, vocab_mask=None,
past_src_data=None, src_pad="<blank>", tgt_pad="<blank>", feature_size=40):
tensors = dict()
if src_data is not None:
tensors['source'], tensors['source_pos'], src_lengths = merge_data(src_data, align_right=src_align_right,
type=src_type, augmenter=augmenter,
upsampling=upsampling, feature_size=feature_size,
dataname="source", src_pad=src_pad)
tensors['src_type'] = src_type
tensors['src_selfattn_mask'] = tensors['source'].eq(src_pad)
tensors['source'] = tensors['source'].transpose(0, 1).contiguous()
if tensors['source_pos'] is not None:
tensors['source_pos'] = tensors['source_pos'].transpose(0, 1)
tensors['src_lengths'] = torch.LongTensor(src_lengths)
tensors['src_size'] = sum(src_lengths)
if tgt_data is not None:
target_full, target_pos, tgt_lengths = merge_data(tgt_data, align_right=tgt_align_right,
dataname="target", tgt_pad=tgt_pad)
tensors['tgt_selfattn_mask'] = target_full.eq(tgt_pad)
target_full = target_full.t().contiguous() # transpose BxT to TxB
tensors['target'] = target_full
tensors['target_input'] = target_full[:-1]
tensors['target_input_selfattn_mask'] = tensors['target_input'].transpose(0, 1).eq(tgt_pad)
tensors['target_output'] = target_full[1:]
if target_pos is not None:
tensors['target_pos'] = target_pos.t().contiguous()[:-1]
tgt_size = sum([len(x) - 1 for x in tgt_data])
tensors['tgt_lengths'] = tgt_lengths
else:
tgt_size = 0
tensors['tgt_lengths'] = None
# merge data for the previous source
if past_src_data is not None:
tensors['past_source'], tensors['past_source_pos'], past_src_lengths = merge_data(past_src_data,
align_right=src_align_right,
type=src_type,
augmenter=augmenter,
upsampling=upsampling,
feature_size=feature_size,
dataname="source",
src_pad=src_pad)
tensors['past_source'] = tensors['past_source'].transpose(0, 1).contiguous()
if tensors['past_source_pos'] is not None:
tensors['past_source_pos'] = tensors['past_source_pos'].transpose(0, 1)
tensors['past_src_lengths'] = torch.LongTensor(past_src_lengths)
tensors['past_src_size'] = sum(past_src_lengths)
tensors['tgt_size'] = tgt_size
tensors['size'] = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
tensors['target_lang'] = torch.cat(tgt_lang_data).long()
if src_atbs_data is not None:
tensors['source_atbs'] = torch.cat(src_atbs_data).long()
if tgt_atbs_data is not None:
tensors['target_atbs'] = torch.cat(tgt_atbs_data).long()
tensors['vocab_mask'] = vocab_mask
return LightBatch(tensors)
def rewrap(light_batch):
"""
Currently this light batch is used in data collection to avoid pickling error
After that it is converted to Batch
:param light_batch:
:return:
"""
return Batch(light_batch.tensors)
class Batch(object):
# An object to manage the data within a minibatch
def __init__(self, tensors):
self.tensors = defaultdict(lambda: None, tensors)
self.src_size = tensors['src_size']
self.tgt_size = tensors['tgt_size']
self.size = tensors['size']
self.src_lengths = tensors['src_lengths']
self.tgt_lengths = tensors['tgt_lengths']
self.has_target = True if self.tensors['target'] is not None else False
self.vocab_mask = tensors['vocab_mask']
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False, device=None):
"""
Send the minibatch data into GPU.
:param device: default = None (default CUDA device)
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
if isinstance(k, torch.Tensor):
v = tensor[k]
tensor[k] = v.cuda(device=device)
elif tensor is not None:
if isinstance(tensor, torch.Tensor):
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda(device=device)
else:
continue
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
# if self.src_type == 'text':
if len(self.tensors['source'].shape) == 2:
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
# target_full = self.tensors['target']
# self.tensors['target_input'] = target_full[:-1]
# self.tensors['target_output'] = target_full[1:]
# self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# Masked Predictive Coding mask
# Randomly choose positions and set features to Zero
# For later reconstruction
def mask_mpc(self, p=0.5):
# the audio has size [T x B x (F+1)] the FIRST dimension is padding
# need to sample a mask
source = self.tensors['source']
with torch.no_grad():
source = source.narrow(2, 1, source.size(2) - 1)
# p drop -> 1 - p keeping probability
masked_positions = source.new(source.size(0), source.size(1)).bernoulli_(1 - p)
self.tensors['original_source'] = source.clone()
source.mul_(
masked_positions.unsqueeze(-1)) # in-place multiplication that will change the underlying storage
# remember the positions to be used later in losses
self.tensors['masked_positions'] = masked_positions
return
class LightBatch:
def __init__(self, tensors):
self.tensors = tensors
def pin_memory(self):
"""
Enable memory pinning
:return:
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
if isinstance(v, torch.Tensor):
tensor[k] = v.pin_memory()
elif tensor is not None:
if isinstance(tensor, torch.Tensor):
self.tensors[key] = self.tensors[key].pin_memory()
else:
continue
return self
class Dataset(torch.utils.data.Dataset):
def get_tgt_pad(self):
return self.tgt_pad
def get_batches(self):
return self.batches
def get_collater(self):
return self.collater
def get_size(self):
return self.num_batches
def __init__(self, src_data, tgt_data,
src_sizes=None, tgt_sizes=None,
src_langs=None, tgt_langs=None,
src_atbs=None, tgt_atbs=None,
batch_size_frames=1280000,
batch_size_words=16384,
data_type="text", batch_size_sents=128,
multiplier=1, sorting=False,
augment=False,
src_align_right=False, tgt_align_right=False,
verbose=False, cleaning=False, debug=False,
num_split=1,
sa_f=8, sa_t=64, input_size=40,
past_src_data=None,
past_src_data_sizes=None,
constants=None,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
if constants is not None:
constants = dill.loads(constants)
self.tgt_pad = constants.TGT_PAD
self.src_pad = constants.SRC_PAD
else:
self.tgt_pad = onmt.constants.TGT_PAD
self.src_pad = onmt.constants.SRC_PAD
self.src = src_data
self.past_src = past_src_data
self._type = data_type
self.src_align_right = src_align_right
if self.src_align_right and verbose:
print("* Source sentences aligned to the right side.")
self.tgt_align_right = tgt_align_right
self.upsampling = kwargs.get('upsampling', False)
self.max_src_len = kwargs.get('max_src_len', None)
self.max_tgt_len = kwargs.get('max_tgt_len', 256 )
self.cleaning = int(cleaning)
self.debug = debug
self.num_split = num_split
self.vocab_mask = None
self.use_past_src = self.past_src is not None
self.min_tgt_len = kwargs.get('min_tgt_len', 3)
self.min_src_len = kwargs.get('min_src_len', 2)
self.batch_size_frames = batch_size_frames
cut_off_size = kwargs.get('cut_off_size', 200000)
smallest_batch_size = kwargs.get('smallest_batch_size', 4)
if self.max_src_len is None:
if self._type == 'text':
self.max_src_len = 256
elif self._type == 'wav':
self.max_src_len = 320000
else:
# for audio set this to 2048 frames
self.max_src_len = 4096 if not self.use_past_src else 8192
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
else:
self.tgt = None
# Processing data sizes
if self.src is not None:
if src_sizes is not None:
if verbose:
print("Loading source size from binarized data ...")
src_sizes = np.asarray(src_sizes)
else:
if verbose:
print("Source size not available. Computing source size from data...")
src_sizes = np.asarray([data.size(0) for data in self.src])
else:
src_sizes = None
# add the past source size to source size (to balance out the encoder part during allocation)
if self.use_past_src:
if past_src_data_sizes is not None:
src_sizes += np.asarray(past_src_data_sizes)
else:
src_sizes += np.asarray([data.size(0) for data in self.past_src])
if self.tgt is not None:
if tgt_sizes is not None:
print("Loading target size from binarized data ...")
tgt_sizes = np.asarray(tgt_sizes)
else:
print("Target size not available. Computing target size from data...")
tgt_sizes = np.asarray([data.size(0) for data in self.tgt])
else:
tgt_sizes = None
# sort data to have efficient mini-batching during training
if sorting:
if self._type == 'text':
sorted_order = np.lexsort((src_sizes, tgt_sizes))
elif self._type in ['audio', 'wav']:
sorted_order = np.lexsort((tgt_sizes, src_sizes))
else:
sorted_order = np.arange(len(self.src))
self.order = None
# store data length in numpy for fast query
if self.tgt is not None and self.src is not None:
stacked_sizes = np.stack((src_sizes, tgt_sizes - 1), axis=0)
data_lengths = np.amax(stacked_sizes, axis=0)
elif self.src is None:
data_lengths = tgt_sizes
else:
data_lengths = src_sizes
# Processing language ids
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
# Processing attributes
self.src_atbs = src_atbs
self.tgt_atbs = tgt_atbs
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
if self.src_atbs is not None:
assert(len(src_atbs) <= 1), "For a bilingual dataset, expect attributes to be 'singular' too"
else:
self.bilingual = False
self.full_size = len(src_sizes)
# self.full_size = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = True
# group samples into mini-batches
# if verbose:
# print("* Allocating mini-batches ...")
if self._type in ['audio', 'wav']:
self.batches = allocate_batch_unbalanced(sorted_order, data_lengths,
src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, self.multiplier,
self.max_src_len, self.max_tgt_len,
self.min_src_len, self.min_tgt_len, self.cleaning,
cut_off_size, smallest_batch_size)
else:
self.batches = allocate_batch(sorted_order, data_lengths,
src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, self.multiplier,
self.max_src_len, self.max_tgt_len,
self.min_src_len, self.min_tgt_len, self.cleaning)
# the second to last mini-batch is likely the largest
# (the last one can be the remnant after grouping samples which has less than max size)
self.largest_batch_id = len(self.batches) - 3
self.num_batches = len(self.batches)
self.batch_sizes = [len(x) for x in self.batches]
# if self.src_sizes is not None:
# self.batch_src_sizes = [max([self.src_sizes[x] for x in b]) for b in self.batches]
# else:
# self.batch_src_sizes = [0 for b in self.batches]
#
# if self.tgt_sizes is not None:
# self.batch_tgt_sizes = [max([self.tgt_sizes[x] for x in b]) for b in self.batches]
# else:
# self.batch_tgt_sizes = [0 for b in self.batches]
print("Number of sentences before cleaning and sorting: %d" % len(src_sizes) )
print("Number of sentences after cleaning and sorting: %d" % sum(self.batch_sizes) )
print("Number of batches after cleaning and sorting: %d" % self.num_batches)
self.cur_index = 0
self.batchOrder = None
self.input_size = input_size
if augment:
self.augmenter = Augmenter(F=sa_f, T=sa_t, input_size=input_size)
else:
self.augmenter = None
def flush_cache(self):
if hasattr(self.src, 'flush_cache'):
self.src.flush_cache()
def size(self):
return self.full_size
def switchout(self, batch):
pass
def set_epoch(self, epoch):
pass
def set_mask(self, vocab_mask):
self.vocab_mask = vocab_mask
def get_largest_batch(self, bsz=-1, src_size=-1, tgt_size=-1):
if bsz == -1 and src_size == -1 and tgt_size == -1:
return self.get_batch(self.largest_batch_id)
else:
raise NotImplementedError
# batch = None
# for i in range(self.num_batches):
#
# src_size_ = self.batch_src_sizes[i]
# tgt_size_ = self.batch_tgt_sizes[i]
# bsz_size_ = self.batch_sizes[i]
#
# get_batch = True
# if bsz > 0:
# if bsz_size_ != bsz:
# get_batch = False
#
# if src_size > 0:
# if src_size_ != src_size:
# get_batch = False
#
# if tgt_size > 0:
# if tgt_size_ != tgt_size:
# get_batch = False
#
# if get_batch:
# # print("Found batch satisfying the conditions bsz %d src_size %d tgt_size %d" % (bsz, src_size, tgt_size))
# return self.get_batch(i)
# print("Cannot find the batch satisfying those conditions")
return self.get_batch(self.largest_batch_id)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
src_lang, tgt_lang = None, None
src_atb, tgt_atb = None, None
if self.bilingual:
if self.src_langs is not None:
src_lang = self.src_langs[0] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang = self.tgt_langs[0] # should be a tensor [1]
if self.src_atbs is not None:
src_atb = self.src_atbs[0]
if self.tgt_atbs is not None:
tgt_atb = self.tgt_atbs[0]
else:
if self.src_langs is not None:
src_lang = self.src_langs[index]
if self.tgt_langs is not None:
tgt_lang = self.tgt_langs[index]
# if self.src_atbs is not None:
# src_atb = self.src_atbs[index]
# if self.tgt_atbs is not None:
# tgt_atb = self.tgt_atbs[index]
src_atb = None
tgt_atb = None
# move augmenter here?
if self.use_past_src:
past_src = self.past_src[index]
else:
past_src = None
sample = {
'src': self.src[index] if self.src is not None else None,
'tgt': self.tgt[index] if self.tgt is not None else None,
'src_lang': src_lang,
'tgt_lang': tgt_lang,
'src_atb': src_atb,
'tgt_atb': tgt_atb,
'past_src': past_src
}
return sample
def get_batch(self, index):
"""
This function is only used in when we need to access a batch directly from the dataset
(Without an external loader)
:param index: the index of the mini-batch in the list
:return: Batch
"""
assert index < self.num_batches, "%d > %d" % (index, self.num_batches)
batch_ids = self.batches[index]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
src_atbs_data = None
tgt_atbs_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
if self.src_atbs is not None:
src_atbs_data = [self.src_atbs[0]]
if self.tgt_atbs is not None:
tgt_atbs_data = [self.tgt_atbs[0]]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
# if self.src_atbs is not None:
# src_atbs_data = [self.src_atbs[i] for i in batch_ids]
# if self.tgt_atbs is not None:
# tgt_atbs_data = [self.tgt_atbs[i] for i in batch_ids]
src_atbs_data = None
tgt_atbs_data = None
if self.use_past_src:
past_src = [self.past_src[i] for i in batch_ids]
else:
past_src = None
batch = rewrap(collate_fn(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_atbs_data=src_atbs_data, tgt_atbs_data=tgt_atbs_data,
src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask,
past_src_data=past_src,
src_pad=self.src_pad,
tgt_pad=self.tgt_pad,
feature_size=self.input_size),
)
return batch
def collater(self, collected_samples):
"""
Merge a list of samples into a Batch
:param collected_samples: list of dicts (the output of the __getitem__)
:return: batch
"""
split_size = math.ceil(len(collected_samples) / self.num_split)
sample_list = [collected_samples[i:i + split_size]
for i in range(0, len(collected_samples), split_size)]
batches = list()
for samples in sample_list:
src_data, tgt_data = None, None
src_lang_data, tgt_lang_data = None, None
src_atbs_data, tgt_atbs_data = None, None
past_src_data = None
if self.src:
src_data = [sample['src'] for sample in samples]
if self.tgt:
tgt_data = [sample['tgt'] for sample in samples]
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
if self.src_atbs is not None:
src_atbs_data = [self.src_atbs[0]]
if self.tgt_atbs is not None:
tgt_atbs_data = [self.tgt_atbs[0]]
else:
if self.src_langs is not None:
src_lang_data = [sample['src_lang'] for sample in samples] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [sample['tgt_lang'] for sample in samples] # should be a tensor [1]
# if self.src_atbs is not None:
# src_atbs_data = [self.src_atbs[i] for i in batch_ids]
# if self.tgt_atbs is not None:
# tgt_atbs_data = [self.tgt_atbs[i] for i in batch_ids]
src_atbs_data = None
tgt_atbs_data = None
if self.use_past_src:
past_src_data = [sample['past_src'] for sample in samples]
batch = collate_fn(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_atbs_data=src_atbs_data, tgt_atbs_data=tgt_atbs_data,
src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask,
past_src_data=past_src_data, src_pad=self.src_pad, tgt_pad=self.tgt_pad,
feature_size=self.input_size)
batches.append(batch)
return batches
def full_size(self):
return self.full_size
# genereate a new batch - order (static)
def create_order(self, random=True):
if random:
self.batchOrder = torch.randperm(self.num_batches)
else:
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# # return the next batch according to the iterator
# def next(self, curriculum=False, reset=True):
#
# # reset iterator if reach data size limit
# if self.cur_index >= self.num_batches:
# if reset:
# self.cur_index = 0
# else:
# return None
#
# if curriculum or self.batchOrder is None:
# batch_index = self.cur_index
# else:
# batch_index = self.batchOrder[self.cur_index]
#
# batch = self[batch_index]
#
# # move the iterator one step
# self.cur_index += 1
#
# return [batch]
#
# def shuffle(self):
# data = list(zip(self.src, self.tgt))
# self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
#
# def set_index(self, iteration):
#
# assert (0 <= iteration < self.num_batches)
# self.cur_index = iteration
| 32,665
| 38.499395
| 129
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/data_iterator.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import random
import numpy as np
import torch
from onmt.data.dataset import rewrap
from onmt.data import data_utils
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None, empty=False):
self.iterable = iterable
self.itr = iter(self)
self.empty = empty
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
if self.empty:
return
for x in self.iterable:
if self.n >= self.total:
return
self.n += 1
yield x
def __next__(self):
if self.empty:
return None
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self.iterable, "take"):
self.iterable.take(n)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, pin_memory=False):
"""Return a new iterator over the dataset.
Args:
:param shuffle: (bool, optional): shuffle batches before returning the
iterator (default: True).
:param pin_memory: bool
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
dataset (~torch.utils.data.Dataset)
"""
class DataIterator(EpochBatchIterating):
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_workers=0,
epoch=1, buffer_size=0, timeout=0, num_shards=1, shard_id=0, fill_value=None, split_even=True):
"""
:param dataset:
:param collate_fn:
:param batch_sampler:
:param seed:
:param num_workers:
:param epoch:
:param buffer_size:
:param timeout:
:param shard_id: equivalent with rank
:param num_shards: equivalent with world size
"""
# it can be torch.utils.data.Dataset or a proxy class used to share between the processes in the node
# assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler) # ??
self.seed = seed
self.num_workers = num_workers
self.epoch = max(epoch, 1)
self.buffer_size = buffer_size
self.timeout = timeout
self.shard_id = shard_id
self.num_shards = num_shards
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._support_prefetch = False
self.fill_value = fill_value
self.split_even = split_even
def __len__(self):
# number of minibatches, or ???
if self.split_even:
return math.ceil(len(self.frozen_batches) / self.num_shards) * self.num_shards
else:
return len(self.frozen_batches)
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called"""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, pin_memory=False, split_even=False):
"""
Return a new iterator over the dataset
:param split_even:
:param pin_memory:
:param shuffle:
:return:
"""
self.epoch = self.next_epoch_idx
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, pin_memory=pin_memory)
self.dataset.set_epoch(self.epoch)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
""" The number of consumed batches in the current epoch"""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
if state_dict is not None:
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
# we finished the epoch, increment epoch counter
self.epoch += 1
else:
self._next_epoch_itr = None
else:
self.epoch = 1
itr_pos = 0
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0, pin_memory=False):
def shuffle_batches(batches_, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches_)
return batches_
if self._support_prefetch:
raise NotImplementedError
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = list(self.frozen_batches)
num_shards = self.num_shards
# if split even then fill the batch with random batches
if self.split_even:
if len(batches) % self.num_shards != 0:
for _ in range(num_shards - (len(batches) % num_shards)):
rand_id = random.randint(0, len(batches) - 1)
batches.append(batches[rand_id])
batches = list(ShardedIterator(batches, num_shards, self.shard_id, fill_value=batches[0]))
# catch the exception when the data is so small that one iterator is completely empty
if len(batches) == 0:
empty = True
else:
empty = False
#
# if offset > 0 and offset >= len(batches):
# return None
if self.num_workers > 0:
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
pin_memory=pin_memory,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset, empty=empty)
return itr
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
# 4 shard for 6 gpu:
# shard_len = 1
# 5 gpus get 0 zeros
n_full_gpus = math.floor(len(iterable) / float(sharded_len))
#
# if shard_id == (num_shards - 1): # last shard takes the remaining
# sharded_len = len(iterable) - sharded_len * (num_shards - 1)
if shard_id < n_full_gpus:
sharded_len = sharded_len
elif shard_id == n_full_gpus: # the very next one after full
sharded_len = len(iterable) - sharded_len * n_full_gpus
else:
sharded_len = 0
# # first islice takes a list of minibatch-ids from shard_id to max, every "num_shards"
# # next, zip_longest takes the zip between (0, 1, ... n) and
# # the minibatches (longest, fill the latter with [])
# # next, map will apply the function taking the minibatches to return the iterator
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
self._source_iter = iter(self._source)
for _ in range(len(self._source)):
item = next(self._source_iter)
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
del self._source_iter
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self.max_len = None
self._consumer = None
self.start_time = time.time()
self.warning_time = None
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.max_len
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return len(self._iterable)
def take(self, n):
self.max_len = n
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < max(1, self._queue.maxsize // 2):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
# print(
# "Data loading buffer is empty or nearly empty (%d). This may "
# "indicate a data loading bottleneck, and increasing the "
# "number of workers (--num-workers) may help." % self._queue.qsize()
# )
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
| 14,073
| 31.354023
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/binarizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import os
from onmt.utils import safe_readline, safe_readaudio
# from multiprocessing import Pool
import torch.multiprocessing as mp
import torch
import onmt
import numpy as np
from .audio_utils import ArkLoader
class SpeechBinarizer:
def __init__(self):
pass
@staticmethod
def binarize_h5_file(filename, output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False):
file_idx = -1;
if filename[-2:] == "h5":
srcf = h5.File(filename, 'r')
else:
file_idx = 0
srcf = h5.File(filename + "." + str(file_idx) + ".h5", 'r')
while True:
if input_format == "h5":
if str(index) in srcf:
feature_vector = np.array(srcf[str(index)])
elif file_idx != -1:
srcf.close()
file_idx += 1
srcf = h5.File(src_file + "." + str(file_idx) + ".h5", 'r')
feature_vector = np.array(srcf[str(index)])
else:
print("No feature vector for index:", index, file=sys.stderr)
break
raise NotImplementedError
@staticmethod
def binarize_file_single_thread(filename, ark_loader, offset=0, end=-1, worker_id=0,
input_format='scp', output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False, sample_rate=16000, verbose=False):
# if output_format is scp, we only read the length for sorting
if output_format == 'scp':
assert input_format in ['kaldi', 'scp']
if output_format == 'wav':
input_format = 'wav'
# audio_data = iter(ReadHelper('scp:' + filename))
# data_file = open(filename)
# data_keys = list(data.keys())
# data_paths = list(data._dict.values())
result = dict()
data = list()
lengths = list()
index = 0
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if 0 < end < f.tell():
break
parts = line.split()
key = parts[0]
# this special case is for the "preceeding"
if key == 'NULL':
feature_vector = torch.zeros(0, 0)
lengths.append(feature_vector.size(0))
line = f.readline()
continue
if input_format in ['scp', 'kaldi']:
# an scp file has the format: uttid path:mem
path = parts[1]
# read numpy array from the ark here
feature_vector = ark_loader.load_mat(path)
if stride == 1:
feature_vector = torch.from_numpy(feature_vector)
else:
feature_vector = torch.from_numpy(feature_vector[0::stride])
if concat > 1:
add = (concat - feature_vector.size()[0] % concat) % concat
z = torch.FloatTensor(add, feature_vector.size()[1]).zero_()
feature_vector = torch.cat((feature_vector, z), 0)
feature_vector = feature_vector.reshape((int(feature_vector.size()[0] / concat),
feature_vector.size()[1] * concat))
if prev_context > 0:
print("Multiple ASR context isn't supported at the moment ")
raise NotImplementedError
if fp16 and output_format not in ['scp', 'scpmem']:
feature_vector = feature_vector.half()
if output_format not in ['scp', 'scpmem']:
data.append(feature_vector.numpy()) # convert to numpy for serialization
else:
data.append(path)
elif input_format == 'wav':
# an wav input file should have format uttid wav_file start end
# in which the start and end (by second) can be 0 0
if len(parts) >= 4:
wavpath, start_time, end_time = parts[1], float(parts[2]), float(parts[3])
else:
wavpath = parts[1]
start_time = 0
end_time = -1
if verbose:
print("processing wav file ...", wavpath, start_time, end_time)
# feature_vector = safe_readaudio(wavpath, start_time, end_time, sample_rate=sample_rate)
feature_vector = ark_loader.load_wav(wavpath, start_time, end_time, sample_rate=sample_rate)
# store a tuple of data and information to load the wav again during training
data.append((wavpath, start_time, end_time, sample_rate))
length = feature_vector.size(0)
lengths.append(length)
# if verbose and length > 256000:
# print('length: ', length)
line = f.readline()
if (index + 1) % 100000 == 0:
print("[INFO] Thread %d Processed %d audio utterances." % (worker_id, index + 1))
index = index + 1
result['data'] = data
result['sizes'] = lengths
result['id'] = worker_id
result['total'] = len(lengths)
return result
@staticmethod
def binarize_file(filename, input_format='scp', output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False, num_workers=1, verbose=False):
result = dict()
for i in range(num_workers):
result[i] = dict()
final_result = dict()
def merge_result(bin_result):
result[bin_result['id']]['data'] = bin_result['data']
result[bin_result['id']]['sizes'] = bin_result['sizes']
offsets = Binarizer.find_offsets(filename, num_workers)
ark_loaders = dict()
for i in range(num_workers):
if input_format in ['scp', 'kaldi']:
ark_loaders[i] = ArkLoader()
elif input_format in ['wav']:
from .audio_utils import WavLoader
ark_loaders[i] = WavLoader()
else:
ark_loaders[i] = None
if num_workers > 1:
pool = mp.Pool(processes=num_workers)
mp_results = []
for worker_id in range(num_workers):
mp_results.append(pool.apply_async(
SpeechBinarizer.binarize_file_single_thread,
args=(filename, ark_loaders[worker_id], offsets[worker_id], offsets[worker_id + 1], worker_id,
input_format, output_format, prev_context, concat, stride, fp16, 16000, verbose),
))
pool.close()
pool.join()
for r in mp_results:
merge_result(r.get())
else:
sp_result = SpeechBinarizer.binarize_file_single_thread(filename, ark_loaders[0], offsets[0], offsets[1], 0,
input_format='scp', output_format=output_format,
prev_context=prev_context, concat=concat,
stride=stride, fp16=fp16, verbose=verbose)
merge_result(sp_result)
final_result['data'] = list()
final_result['sizes'] = list()
# put the data into the list according the worker indices
for idx in range(num_workers):
for j in range(len(result[idx]['data'])):
x = result[idx]['data'][j]
# if we store the numpy array, then convert to torch
# otherwise, x is the scp path to the matrix
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
final_result['data'].append(x)
final_result['sizes'] += result[idx]['sizes']
# remember to close the workers when its done
for i in range(num_workers):
if ark_loaders[i] is not None:
ark_loaders[i].close()
return final_result
class Binarizer:
def __init__(self):
pass
@staticmethod
def find_offsets(filename, num_chunks):
"""
:param filename: string
:param num_chunks: int
:return: a list of offsets (positions to start and stop reading)
"""
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
@staticmethod
def binarize_file_single_thread(filename, tokenizer, vocab, worker_id=0, bos_word=None, eos_word=None,
offset=0, end=-1, data_type='int64', verbose=False,
external_tokenizer=[None, None], lang=None, target=False):
"""
This function should read in the lines, convert sentences to tensors
And then finalize into a dataset?
"""
result = dict()
unk_word = onmt.constants.UNK_WORD
data = list()
sizes = list()
count = 0
ext_tokenizer, external_tokenizer_name = external_tokenizer
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
n_bad_sentences = 0
while line:
if 0 < end < f.tell():
break
if ext_tokenizer is None:
tokenized_sent = tokenizer.tokenize(line)
binarized_line = vocab.convertToIdx(tokenized_sent, unk_word,
bos_word=bos_word, eos_word=eos_word, type=data_type)
# move to shared_memory to transfer between threads
# conversion to numpy is necessary because torch.Tensor is not serializable by the mprocess
data += [binarized_line.numpy()]
sizes += [len(tokenized_sent)]
else:
tensor = ext_tokenizer(line.strip())['input_ids']
# print(tensor)
# assert that the mbart50 tokenizer uses the correct language ID
if "mbart-large-50" in external_tokenizer_name.lower():
assert tensor[0] == vocab.convertToIdx([lang], None)[0], "The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
elif "m2m" in external_tokenizer_name.lower():
lang_token = "__" + lang + "__"
assert tensor[0] == vocab.convertToIdx([lang_token], None)[0], \
"The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
elif "deltalm" in external_tokenizer_name.lower():
if len(tensor) > 2:
if tensor[0] not in [0, 1, 2, 3]:
assert tensor[0] == vocab.convertToIdx([lang], None)[0], "The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
if target and tensor[0] != tensor[-1]:
# for the target side and in the multilingual case it is <eos> <langid> X <eos>
tensor = [tensor[-1]] + tensor
elif "mbart50eu" in external_tokenizer_name.lower():
if len(tensor) > 2:
if tensor[0] not in [0, 1, 2, 3]:
_lang = _lang if lang != "eu" else "en_XX"
assert tensor[0] == vocab.convertToIdx([lang], None)[0], \
"The first token must be language ID, expecting %d get %d. Current language: %s" \
% (vocab.convertToIdx([lang], None)[0], tensor[0], ext_tokenizer.src_lang)
# pad_id = vocab.convertToIdx(["<pad>"], None)[0]
# assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
if len(tensor) <= 2:
n_bad_sentences += 1
# print("[Warning] empty sentence with %d tokens including <bos> <eos>" % len(tensor))
sizes += [len(tensor)]
_dtype = np.int32
if data_type == "int64":
_dtype = np.int64
elif data_type == "int16":
_dtype = np.int16
data += [np.asarray(tensor, dtype=_dtype)]
line = f.readline()
count += 1
if count % 100000 == 0:
if verbose:
print("[INFO] Thread %d processed %d lines." % (worker_id, count))
if verbose:
if n_bad_sentences > 0:
print("[Warning] %d empty sentence including <bos> <eos>" % n_bad_sentences)
print("[INFO] Thread %d Done." % worker_id)
result['data'] = data
result['sizes'] = sizes
result['id'] = worker_id
result['total'] = len(sizes)
return result
@staticmethod
def binarize_file(filename, vocab, tokenizer, bos_word=None, eos_word=None,
data_type='int64', num_workers=1, verbose=False, external_tokenizer="",
lang=None, lang_list=[], target=False):
if "mbart-large-50" in external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % external_tokenizer)
from transformers import MBart50TokenizerFast
try: # check if this tokenizer is saved locally or not
print("Looking for pre-downloaded tokenizer ...")
ext_tokenizer = torch.load("mbart-large-50.tokenizer.pt")
ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in mBART50." % lang)
except FileNotFoundError as e:
print("Expected error: ", e, "Downloading tokenizer ...")
ext_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50")
ext_tokenizer.src_lang = lang
# ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in mBART50." % lang)
torch.save(ext_tokenizer, "mbart-large-50.tokenizer.pt")
elif "m2m100" in external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % external_tokenizer)
from transformers import M2M100Tokenizer
ext_tokenizer = M2M100Tokenizer.from_pretrained(external_tokenizer, src_lang=lang)
ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in M2M100." % lang)
elif "mbart50eu" in external_tokenizer.lower():
print("[INFO] Using the MBART50EU tokenizer...")
from transformers import MBart50TokenizerFast
# from pretrain_module.tokenization_mbart50eu import MBART50TokenizerEU
# src_lang = lang if lang != "eu" else "en_XX"
src_lang = "<s>"
ext_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50")
ext_tokenizer.src_lang = src_lang
elif "bart" in external_tokenizer.lower():
print("[INFO] Using the external BART tokenizer...")
from transformers import BartTokenizer
ext_tokenizer = BartTokenizer.from_pretrained(external_tokenizer)
elif "deltalm" in external_tokenizer.lower():
print("[INFO] Using the DeltaLM tokenizer...")
from pretrain_module.tokenization_deltalm import MultilingualDeltaLMTokenizer
ext_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", lang_list=lang_list,
src_lang=lang)
# from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
# try: # check if this tokenizer is saved locally or not
# ext_tokenizer = torch.load("deltalm.tokenizer.pt")
# ext_tokenizer.src_lang = lang
# except FileNotFoundError:
# ext_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=lang)
elif "nllb" in external_tokenizer.lower():
from transformers import NllbTokenizer
from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
try: # check if this tokenizer is saved locally or not
ext_tokenizer = torch.load("nllb.tokenizer.pt")
ext_tokenizer.src_lang = lang
except FileNotFoundError:
ext_tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", src_lang=lang)
torch.save(ext_tokenizer, "nllb.tokenizer.pt")
elif external_tokenizer is None or len(external_tokenizer) == 0:
ext_tokenizer = None
else:
raise NotImplementedError
ext_tokenizer = [ext_tokenizer, external_tokenizer]
result = dict()
for i in range(num_workers):
result[i] = dict()
final_result = dict()
def merge_result(bin_result):
result[bin_result['id']]['data'] = bin_result['data']
result[bin_result['id']]['sizes'] = bin_result['sizes']
offsets = Binarizer.find_offsets(filename, num_workers)
if num_workers > 1:
pool = mp.Pool(processes=num_workers)
mp_results = []
for worker_id in range(num_workers):
mp_results.append(pool.apply_async(
Binarizer.binarize_file_single_thread,
args=(filename, tokenizer, vocab, worker_id, bos_word, eos_word,
offsets[worker_id], offsets[worker_id + 1], data_type, verbose, ext_tokenizer, lang, target),
))
pool.close()
pool.join()
for r in mp_results:
merge_result(r.get())
else:
sp_result = Binarizer.binarize_file_single_thread(filename, tokenizer, vocab, 0, bos_word, eos_word,
offsets[0], offsets[1], data_type,
external_tokenizer=ext_tokenizer,
lang=lang, target=target)
merge_result(sp_result)
final_result['data'] = list()
final_result['sizes'] = list()
# put the data into the list according the worker indices
for idx in range(num_workers):
final_result['data'] += result[idx]['data']
final_result['sizes'] += result[idx]['sizes']
return final_result
| 20,533
| 40.906122
| 126
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/multi_dataset.py
|
from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
from .dataset import Dataset
from .mmap_indexed_dataset import MMapIndexedDataset
from .scp_dataset import SCPIndexDataset
| 242
| 21.090909
| 52
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/tokenizer.py
|
import onmt
def split_line_by_char(line, word_list=["<unk>"]):
chars = list()
words = line.strip().split()
for i, word in enumerate(words):
if word in word_list:
chars.append(word)
else:
for c in word:
chars.append(c)
if i < (len(words) - 1):
chars.append(' ')
return chars
class Tokenizer(object):
def __init__(self, input_type='word', lower=False):
self.input_type = input_type
self.lower = lower
def __call__(self, sentence):
return self.tokenize(sentence)
def tokenize(self, sentence):
if self.input_type == "word":
tokens = sentence.strip().split()
elif self.input_type == "char":
tokens = split_line_by_char(sentence)
else:
raise NotImplementedError("Input type not implemented")
return tokens
FAIRSEQ_LANGUAGE_CODES = ["ar_AR",
"cs_CZ",
"de_DE",
"en_XX",
"es_XX",
"et_EE",
"fi_FI",
"fr_XX",
"gu_IN",
"hi_IN",
"it_IT",
"ja_XX",
"kk_KZ",
"ko_KR",
"lt_LT",
"lv_LV",
"my_MM",
"ne_NP",
"nl_XX",
"ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR",
"he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL",
"ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK",
"xh_ZA", "gl_ES", "sl_SI"]
class HuggingFaceTokenizer(object):
def __init__(self, pretrained_tokenizer):
if pretrained_tokenizer == 'facebook/mbart-large-50':
from transformers import MBart50TokenizerFast
tokenizer_ = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX")
else:
raise NotImplementedError
self._tokenizer = tokenizer_
def tokenize(self, text, src_lang=None):
if src_lang is not None:
found = False
for lang in FAIRSEQ_LANGUAGE_CODES:
if lang[:2] == src_lang:
self._tokenizer.src_lang = lang
found = True
break
if not found:
print("Language code %s not found" % lang)
raise NotImplementedError
# add special tokens, etc
tensor = self._tokenizer(text)['input_ids']
# convert back to text
tokens = self._tokenizer.convert_ids_to_tokens(tensor, skip_special_tokens=False)
return tokens
| 3,035
| 28.764706
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/multidata_iterator.py
|
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
from .data_iterator import EpochBatchIterating, DataIterator
import numpy as np
import torch
class MultiEpochIterator(object):
# this class stores N epoch iterators for N datasets
# init is called at the beginning of the epoch
def __init__(self, iterators, round_robin=False):
"""
:param iterators: a list of CountingIterators
:param round_robin: if the data is sampled iteratively 1 to N or randomly
"""
self.iterators = iterators
self.round_robin = round_robin
self.n_iterators = len(iterators)
# self.total = sum([len(iterator) for iterator in self.iterators])
self.sizes = [len(iterator) for iterator in self.iterators]
self.total = sum(self.sizes)
self.itr = iter(self)
if self.round_robin:
self.itr_indices = torch.arange(self.n_iterators)
else:
# self.itr_indices = torch.randperm(self.n_iterators)
with torch.no_grad():
self.itr_indices = torch.Tensor(self.sizes).div(self.total)
self.idx = -1
self.n_yielded = 0
def iterations_in_epoch(self):
"""
:return: a list of iterations in epoch for each iterator
"""
return [iterator.n for iterator in self.iterators]
def load_iterations(self, iteration_in_epochs):
for iterator, iter_in_epoch in zip(self.iterators, iteration_in_epochs):
iterator.n = iter_in_epoch
def __len__(self):
return sum([len(iterator) for iterator in self.iterators])
def __iter__(self):
while True:
if self.n_yielded >= self.total:
return
if self.round_robin:
self.idx = self.idx + 1
if self.idx >= self.n_iterators:
self.idx = 0
cur_iterator = self.iterators[self.itr_indices[self.idx]]
# if the current iterator is not exhausted, then yield
# otherwise go to the next one
if cur_iterator.has_next():
self.n_yielded += 1
yield next(cur_iterator)
else:
continue
else:
# sample randomly from the iterators
# large datasets will be likely to generate more samples
# smaller datasets will be less likely
# but averaging-out, the model is more balanced than round-robin
sampled_itr = torch.multinomial(self.itr_indices, 1).unsqueeze(-1).item()
# if the current iterator is not exhausted, then yield
# otherwise resample
cur_iterator = self.iterators[sampled_itr]
if cur_iterator.has_next():
self.n_yielded += 1
yield next(cur_iterator)
else:
# zero-out that index to avoid sampling into the same empty iterator
with torch.no_grad():
self.itr_indices[sampled_itr].zero_()
continue
def __next__(self):
return next(self.itr)
def has_next(self):
return self.n_yielded < self.total
def skip(self, num_to_skip):
for iterator in self.iterators:
iterator.skip(num_to_skip)
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
for iterator in self.iterators:
iterator.take(n)
class MultiDataIterator(EpochBatchIterating):
def next_epoch_itr(self, shuffle=True, pin_memory=False):
self.epoch = self.next_epoch_idx
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, pin_memory=pin_memory
)
for dataset in self.datasets:
dataset.set_epoch(self.epoch)
self.shuffle = shuffle
return self._cur_epoch_itr
# each dataset = dataiterator > generate 1 epoch iterator
# this class gen
def __init__(self, datasets, seed=1., num_workers=0, epoch=1, buffer_size=0,
timeout=0, round_robin=False, num_shards=1, shard_id=0, split_even=True, dataset_ids=None):
"""
:param datasets: list of Datasets
:param seed: randomizing seed to
:param num_workers:
:param epoch:
:param buffer_size:
:param timeout:
:param round_robin:
:param num_shards:
:param shard_id:
:param split_even: Split the datasets evenly (otherwise adding samples)
:param dataset_ids: Selectively choose datasets involved
"""
self.datasets = datasets
self.data_iterators = list()
for i, dataset in enumerate(datasets):
if dataset_ids is not None and len(dataset_ids) > 0:
if i not in dataset_ids:
continue
self.data_iterators.append(DataIterator(dataset, dataset.get_collater(), dataset.get_batches(), seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
timeout=timeout, num_shards=num_shards,
shard_id=shard_id, split_even=split_even))
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._support_prefetch = False
self.round_robin = round_robin
self.epoch = max(epoch, 1)
self.n_samples = sum([dataset.get_size() for dataset in self.datasets])
def __len__(self):
return sum([len(data_iterator) for data_iterator in self.data_iterators])
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called"""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def end_of_epoch(self) -> bool:
return not self._cur_epoch_itr.has_next()
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
@property
def iterations_in_epoch(self):
""" The number of consumed batches in the current epoch"""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.iterations_in_epoch()
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.iterations_in_epoch()
return [0] * len(self.data_iterators)
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
if state_dict is not None:
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', [0] * len(self.data_iterators))
if sum(itr_pos) > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offsets=itr_pos
)
if self._next_epoch_itr is None:
# we finished the epoch, increment epoch counter
self.epoch += 1
else:
self._next_epoch_itr = None
else:
self.epoch = 1
itr_pos = list()
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle=False, offsets=None, pin_memory=False):
epoch_iterators = list()
if offsets is not None and sum(offsets) >= self.n_samples:
return None
if offsets is None:
offsets = [0] * len(self.data_iterators)
# first, generate an iterator for each data iterator
for (data_iterator, offset) in zip(self.data_iterators, offsets):
epoch_iterator = data_iterator._get_iterator_for_epoch(epoch, shuffle, offset, pin_memory=pin_memory)
epoch_iterators.append(epoch_iterator)
# next, use an multi epoch iterator
epoch_iterator = MultiEpochIterator(epoch_iterators, round_robin=self.round_robin)
return epoch_iterator
| 9,036
| 34.163424
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/data/scp_dataset.py
|
import torch
from kaldiio import load_mat
from functools import lru_cache
import numpy as np
from .audio_utils import _parse_arkpath, ArkLoader
import warnings
warnings.filterwarnings("ignore", message="The given NumPy array is not writeable ")
class SCPIndexDataset(torch.utils.data.Dataset):
"""
This dataset simply stores a list of paths to ark matrices
The __get__ function uses load_mat from kaldiio to read the ark matrices for retrieval
"""
def __init__(self, scp_path_list, concat=4, shared_object=None):
"""
:param scp_path_list: list of path to the ark matrices
"""
self.scp_path_list = scp_path_list
self._sizes = len(self.scp_path_list)
self._dtype = torch.float32
self.concat = concat
if shared_object is not None:
self.reader = shared_object.reader
else:
self.reader = ArkLoader()
@property
def dtype(self):
# I'm not sure when this function is called
return self._dtype
@property
def sizes(self):
return self._sizes
def __len__(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
scp_path = self.scp_path_list[i]
mat = self.reader.load_mat(scp_path)
feature_vector = torch.from_numpy(mat)
concat = self.concat
if concat > 1:
add = (concat - feature_vector.size()[0] % concat) % concat
z = torch.FloatTensor(add, feature_vector.size()[1]).zero_()
feature_vector = torch.cat((feature_vector, z), 0)
feature_vector = feature_vector.reshape((int(feature_vector.size()[0] / concat),
feature_vector.size()[1] * concat))
return feature_vector
@property
def sizes(self):
return self._index.sizes
| 1,877
| 29.786885
| 92
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/indexed_dataset.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import struct
import numpy as np
import torch
import torch.utils.data
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: float,
7: np.double,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for TorchNet IndexedDataset"""
def __init__(self, path):
super().__init__()
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == b'TNTIDX\x00\x00'
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self.size, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self.size + 1)
self.data_offsets = read_longs(f, self.size + 1)
self.sizes = read_longs(f, self.s)
self.read_data(path)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
def __del__(self):
self.data_file.close()
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
return item
def __len__(self):
return self.size
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and
os.path.exists(data_file_path(path))
)
class IndexedInMemoryDataset(IndexedDataset):
"""Loader for TorchNet IndexedDataset, keeps all the data in memory"""
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb')
self.buffer = np.empty(self.data_offsets[-1], dtype=self.dtype)
self.data_file.readinto(self.buffer)
self.data_file.close()
def __del__(self):
pass
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
return torch.from_numpy(a).long()
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
| 4,543
| 28.128205
| 84
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/audio_utils.py
|
import numpy as np
from contextlib import contextmanager
import io
from io import TextIOBase
import os
import subprocess
import sys
import warnings
from functools import partial
from io import BytesIO
from io import StringIO
import re
import struct
import sys
import warnings
import soundfile
import math
import torch
from .kaldiio.compression_header import GlobalHeader
from .kaldiio.compression_header import PerColHeader
from .kaldiio.utils import default_encoding
from .kaldiio.utils import LazyLoader
from .kaldiio.utils import MultiFileDescriptor
from .kaldiio.utils import open_like_kaldi
from .kaldiio.utils import open_or_fd
from .kaldiio.utils import seekable
from .kaldiio.wavio import read_wav
from .kaldiio.wavio import write_wav
PY3 = sys.version_info[0] == 3
if PY3:
from collections.abc import Mapping
binary_type = bytes
string_types = str,
else:
from collections import Mapping
binary_type = str
string_types = basestring, # noqa: F821
# load scp function
# audio downsampling function
def _parse_arkpath(ark_name):
"""Parse arkpath
Args:
ark_name (str):
Returns:
Tuple[str, int, Optional[Tuple[slice, ...]]]
Examples:
>>> _parse_arkpath('a.ark')
'a.ark', None, None
>>> _parse_arkpath('a.ark:12')
'a.ark', 12, None
>>> _parse_arkpath('a.ark:12[3:4]')
'a.ark', 12, (slice(3, 4, None),)
>>> _parse_arkpath('cat "fo:o.ark" |')
'cat "fo:o.ark" |', None, None
"""
if ark_name.rstrip()[-1] == '|' or ark_name.rstrip()[0] == '|':
# Something like: "| cat foo" or "cat bar|" shouldn't be parsed
return ark_name, None, None
slices = None
if ':' in ark_name:
fname, offset = ark_name.split(':', 1)
if '[' in offset and ']' in offset:
offset, Range = offset.split('[')
# Range = [3:6, 10:30]
Range = Range.replace(']', '').strip()
slices = _convert_to_slice(Range)
offset = int(offset)
else:
fname = ark_name
offset = None
return fname, offset, slices
def read_int32vector(fd, endian='<', return_size=False):
assert fd.read(2) == b'\0B'
assert fd.read(1) == b'\4'
length = struct.unpack(endian + 'i', fd.read(4))[0]
array = np.empty(length, dtype=np.int32)
for i in range(length):
assert fd.read(1) == b'\4'
array[i] = struct.unpack(endian + 'i', fd.read(4))[0]
if return_size:
return array, (length + 1) * 5 + 2
else:
return array
def read_matrix_or_vector(fd, endian='<', return_size=False):
"""Call from load_kaldi_file
Args:
fd (file):
endian (str):
return_size (bool):
"""
size = 0
assert fd.read(2) == b'\0B'
size += 2
Type = str(read_token(fd))
size += len(Type) + 1
# CompressedMatrix
if 'CM' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
per_col_header = PerColHeader.read(fd, global_header)
size += per_col_header.size
# Read data
buf = fd.read(global_header.rows * global_header.cols)
size += global_header.rows * global_header.cols
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u1'))
array = array.reshape((global_header.cols, global_header.rows))
# Decompress
array = per_col_header.char_to_float(array)
array = array.T
elif 'CM2' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
# Read matrix
buf = fd.read(2 * global_header.rows * global_header.cols)
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u2'))
array = array.reshape((global_header.rows, global_header.cols))
# Decompress
array = global_header.uint_to_float(array)
elif 'CM3' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
# Read matrix
buf = fd.read(global_header.rows * global_header.cols)
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u1'))
array = array.reshape((global_header.rows, global_header.cols))
# Decompress
array = global_header.uint_to_float(array)
else:
if Type == 'FM' or Type == 'FV':
dtype = endian + 'f'
bytes_per_sample = 4
elif Type == 'HM':
dtype = endian + 'e'
bytes_per_sample = 2
elif Type == 'DM' or Type == 'DV':
dtype = endian + 'd'
bytes_per_sample = 8
else:
raise ValueError(
'Unexpected format: "{}". Now FM, FV, DM, DV, '
'CM, CM2, CM3 are supported.'.format(Type))
assert fd.read(1) == b'\4'
size += 1
rows = struct.unpack(endian + 'i', fd.read(4))[0]
size += 4
dim = rows
if 'M' in Type: # As matrix
assert fd.read(1) == b'\4'
size += 1
cols = struct.unpack(endian + 'i', fd.read(4))[0]
size += 4
dim = rows * cols
buf = fd.read(dim * bytes_per_sample)
size += dim * bytes_per_sample
array = np.frombuffer(buf, dtype=np.dtype(dtype))
if 'M' in Type: # As matrix
array = np.reshape(array, (rows, cols))
if return_size:
return array, size
else:
return array
def read_ascii_mat(fd, return_size=False):
"""Call from load_kaldi_file
Args:
fd (file): binary mode
return_size (bool):
"""
string = []
size = 0
# Find '[' char
while True:
b = fd.read(1)
try:
char = b.decode(encoding=default_encoding)
except UnicodeDecodeError:
raise ValueError('File format is wrong?')
size += 1
if char == ' ' or char == '\n':
continue
elif char == '[':
hasparent = True
break
else:
string.append(char)
hasparent = False
break
# Read data
ndmin = 1
while True:
char = fd.read(1).decode(encoding=default_encoding)
size += 1
if hasparent:
if char == ']':
char = fd.read(1).decode(encoding=default_encoding)
size += 1
assert char == '\n' or char == ''
break
elif char == '\n':
ndmin = 2
elif char == '':
raise ValueError(
'There are no corresponding bracket \']\' with \'[\'')
else:
if char == '\n' or char == '':
break
string.append(char)
string = ''.join(string)
assert len(string) != 0
# Examine dtype
match = re.match(r' *([^ \n]+) *', string)
if match is None:
dtype = np.float32
else:
ma = match.group(0)
# If first element is integer, deal as interger array
try:
float(ma)
except ValueError:
raise RuntimeError(
ma + 'is not a digit\nFile format is wrong?')
if '.' in ma:
dtype = np.float32
else:
dtype = np.int32
array = np.loadtxt(StringIO(string), dtype=dtype, ndmin=ndmin)
if return_size:
return array, size
else:
return array
def read_token(fd):
"""Read token
Args:
fd (file):
"""
token = []
# Keep the loop until finding ' ' or end of char
while True:
c = fd.read(1)
if c == b' ' or c == b'':
break
token.append(c)
if len(token) == 0: # End of file
return None
decoded = b''.join(token).decode(encoding=default_encoding)
return decoded
def read_kaldi(fd, endian='<', return_size=False):
"""Load kaldi
Args:
fd (file): Binary mode file object. Cannot input string
endian (str):
return_size (bool):
"""
assert endian in ('<', '>'), endian
binary_flag = fd.read(4)
assert isinstance(binary_flag, binary_type), type(binary_flag)
if seekable(fd):
fd.seek(-4, 1)
else:
fd = MultiFileDescriptor(BytesIO(binary_flag), fd)
if binary_flag[:4] == b'RIFF':
# array: Tuple[int, np.ndarray]
array, size = read_wav(fd, return_size=True)
# Load as binary
elif binary_flag[:2] == b'\0B':
if binary_flag[2:3] == b'\4': # This is int32Vector
array, size = read_int32vector(fd, endian, return_size=True)
else:
array, size = read_matrix_or_vector(fd, endian, return_size=True)
# Load as ascii
else:
array, size = read_ascii_mat(fd, return_size=True)
if return_size:
return array, size
else:
return array
class ArkLoader(object):
def __init__(self, fastest=True):
self.current_ark = None
self.reader = None
self.readers = dict()
self.fastest = fastest
def load_mat(self, ark_name, endian='<', as_bytes=False):
assert endian in ('<', '>'), endian
ark, offset, slices = _parse_arkpath(ark_name)
if not self.fastest:
if self.current_ark != ark:
if self.reader is not None:
self.reader.close()
self.reader = open_like_kaldi(ark, 'rb')
self.current_ark = ark
return self.read_mat(self.reader, offset, slices, endian=endian, as_bytes=as_bytes)
else:
if ark not in self.readers:
self.readers[ark] = open_like_kaldi(ark, 'rb')
fd = self.readers[ark]
return self.read_mat(fd, offset, slices, endian=endian, as_bytes=as_bytes)
def read_mat(self, fd, offset, slices, endian='<', as_bytes=False):
if offset is not None:
fd.seek(offset)
if not as_bytes:
array = read_kaldi(fd, endian)
else:
array = fd.read()
if slices is not None:
if isinstance(array, (tuple, list)):
array = (array[0], array[1][slices])
else:
array = array[slices]
return array
def close(self):
if self.reader is not None:
self.reader.close()
for k in self.readers:
self.readers[k].close()
def safe_readaudio_from_cache(file_, start=0.0, end=0.0, sample_rate=16000):
offset = math.floor(sample_rate * start)
num_frames = -1 if end <= start else math.ceil(sample_rate * (end - start))
dtype = "float32"
frames = file_._prepare_read(offset, None, num_frames)
waveform = file_.read(frames, dtype, always_2d=True)
sample_rate_ = file_.samplerate
tensor = torch.from_numpy(waveform)
tensor = tensor[:, 0].unsqueeze(1)
return tensor
class WavLoader(object):
def __init__(self, cache_size=512):
"""
:param scp_path_list: list of path to the ark matrices
"""
if cache_size > 0:
self.cache = dict()
self.usage = dict()
else:
self.cache = None
self.cache_size = cache_size
def load_wav(self, wav_path, start, end, sample_rate=16000):
# take the object in cache if exists
if wav_path in self.cache:
file_ = self.cache[wav_path]
self.usage[wav_path] = self.usage[wav_path] + 1
else:
# read the audio file
# print(os.path.exists(wav_path), wav_path)
file_ = soundfile.SoundFile(wav_path, 'r')
if len(self.cache) > self.cache_size:
# remove 1 file from cache based on lowest usage, maybe?
min_key = min(self.usage, key=self.usage.get)
if min_key != wav_path: # don't close the current file
self.cache[min_key].close()
self.cache.pop(min_key, None)
self.usage.pop(min_key, None)
# add the object to the cache
self.cache[wav_path] = file_
self.usage[wav_path] = 1
data = safe_readaudio_from_cache(file_, start, end, sample_rate)
return data
def close(self):
for wav_path in self.cache:
self.cache[wav_path].close()
| 12,439
| 27.863109
| 95
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/lm_dataset.py
|
from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.data.dataset import Dataset
class LanguageModelBatch(object):
def __init__(self, data, target, lang, **kwargs):
self.data = data
self.target = target
self.lang = lang
self.tensors = defaultdict(lambda: None)
self.tensors['target_input'] = data
self.tensors['target_output'] = target
self.tensors['target_lang'] = lang
self.tgt_size = target.numel()
self.src_size = 0
self.size = target.size(1)
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class LanguageModelDataset(Dataset):
def __init__(self, data, langs, batch_size_sents=128, batch_size_words=9999,
seq_length=64, **kwargs):
# concatenate all sentences in the data to get a stream
if len(langs) <= 1:
self.single_language = True
else:
self.single_language = False
if not self.single_language:
self.langs = [torch.Tensor([data[i].size(0)]).fill_(langs[i]) for i in range(len(langs))]
else:
self.langs = langs
self.langs = torch.cat(self.langs, dim=0).long()
self.data = torch.cat(data, dim=0).long()
self.batch_size_sents = batch_size_sents
self.batch_size_words = batch_size_words
self.seq_length = seq_length
self.bptt = seq_length
full_length = sum([x.size(0) for x in data])
# group samples into mini batches
self.num_batches = 0
self.batches = []
self.allocate_batch()
self.fullSize = self.num_batches
self.cur_index = 0
self.batchOrder = None
def allocate_batch(self):
self.n_step = self.data.size(0) // self.batch_size_sents
self.data = self.data.narrow(0, 0, self.n_step * self.batch_size_sents)
# Evenly divide the data across the bsz batches.
self.data = self.data.view(self.batch_size_sents, -1).t().contiguous()
# self.num_steps = nbatch - 1
# self.num_batches = (self.n_step + self.seq_length - 1) // self.seq_length
self.batches = []
for i in range(0, self.data.size(0) - 1, self.bptt):
bptt = self.seq_length
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i)
data = self.data[beg_idx:end_idx]
target = self.data[i + 1:i + 1 + seq_len]
if self.single_language:
lang = self.langs
else:
lang = self.langs[beg_idx:end_idx]
self.batches.append((data, target, lang))
self.num_batches = len(self.batches)
# genereate a new batch - order (static)
def create_order(self, random=False):
# For language model order shouldn't be random
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# return the next batch according to the iterator
# for language model
def next(self, curriculum=True, reset=True, split_sizes=1):
# reset iterator if reach data size limit
# if self.cur_index >= self.num_batches:
# if reset:
# self.cur_index = 0
# else:
# return None
#
# batch_index = self.cur_index
#
# seq_len = self.seq_length
#
# top_index = min(batch_index + seq_len, self.data.size(0) - 1)
#
# batch = LMBatch(self.data[batch_index:top_index], target=self.data[batch_index + 1:top_index + 1])
#
# # move the iterator one step
# self.cur_index += seq_len
if self.cur_index >= self.num_batches:
if reset:
self.cur_index = 0
else:
return None
data, target, lang = self.batches[self.cur_index]
batch = LanguageModelBatch(data, target, lang)
self.cur_index += 1
return [batch]
| 4,860
| 28.822086
| 108
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/kaldiio/utils.py
|
from __future__ import unicode_literals
from contextlib import contextmanager
import io
from io import TextIOBase
import os
import subprocess
import sys
import warnings
PY3 = sys.version_info[0] == 3
if PY3:
from collections.abc import MutableMapping
string_types = str,
text_type = str
else:
from collections import MutableMapping
string_types = basestring, # noqa: F821
text_type = unicode # noqa: F821
default_encoding = 'utf-8'
"""
"utf-8" is used not depending on the environements variable,
e.g. LC_ALL, PYTHONIOENCODING, or PYTHONUTF8.
# Note: About the encoding of Python
- filesystem encoding
sys.getfilesystemencoding().
Used for file path and command line arguments.
The default value depends on the local in your unix system.
If Python>=3.7,
- preferred encoding
locale.getpreferredencoding(). Used for open().
The default value depends on the local in your unix system.
- stdout and stdin
If PYTHONIOENCODING is set, then it's used,
else if in a terminal, same as filesystem encoding
else same as preferred encoding
- default encoding
The default encoding for str.encode() or bytes.decode().
If Python2, it's ascii, if Python3, it's utf-8.
"""
if PY3:
def my_popen(cmd, mode='r', buffering=-1):
"""Originated from python os module
Extend for supporting mode == 'rb' and 'wb'
Args:
cmd (str):
mode (str):
buffering (int):
"""
if isinstance(cmd, text_type):
cmd = cmd.encode(default_encoding)
if buffering == 0 or buffering is None:
raise ValueError('popen() does not support unbuffered streams')
if mode == 'r':
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout,
encoding=default_encoding),
proc)
elif mode == 'rb':
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(proc.stdout, proc)
elif mode == 'w':
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin,
encoding=default_encoding),
proc)
elif mode == 'wb':
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(proc.stdin, proc)
else:
raise TypeError('Unsupported mode == {}'.format(mode))
else:
my_popen = os.popen
class _wrap_close(object):
"""Originated from python os module
A proxy for a file whose close waits for the process
"""
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if os.name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
class _stdstream_wrap(object):
def __init__(self, fd):
self.fd = fd
def __enter__(self):
return self.fd
def __exit__(self, *args):
# Never close
pass
def close(self):
# Never close
pass
def __getattr__(self, name):
return getattr(self.fd, name)
def __iter__(self):
return iter(self.fd)
def open_like_kaldi(name, mode='r'):
"""Open a file like kaldi io
Args:
name (str or file):
mode (str):
"""
# If file descriptor
if not isinstance(name, string_types):
if PY3 and 'b' in mode and isinstance(name, TextIOBase):
return name.buffer
else:
return name
# If writting to stdout
if name.strip().endswith('|'):
cmd = name.strip()[:-1].encode(default_encoding)
return my_popen(cmd, mode)
# If reading from stdin
elif name.strip().startswith('|'):
cmd = name.strip()[1:].encode(default_encoding)
return my_popen(cmd, mode)
# If read mode
elif name == '-' and 'r' in mode:
if PY3:
if mode == 'rb':
return _stdstream_wrap(sys.stdin.buffer)
else:
return _stdstream_wrap(
io.TextIOWrapper(sys.stdin.buffer,
encoding=default_encoding))
else:
return _stdstream_wrap(sys.stdin)
# If write mode
elif name == '-' and ('w' in mode or 'a' in mode):
if PY3:
if (mode == 'wb' or mode == 'ab'):
return _stdstream_wrap(sys.stdout.buffer)
else:
return _stdstream_wrap(
io.TextIOWrapper(sys.stdout.buffer,
encoding=default_encoding))
else:
return _stdstream_wrap(sys.stdout)
else:
encoding = None if 'b' in mode else default_encoding
return io.open(name, mode, encoding=encoding)
@contextmanager
def open_or_fd(fname, mode):
# If fname is a file name
if isinstance(fname, string_types):
encoding = None if 'b' in mode else default_encoding
f = io.open(fname, mode, encoding=encoding)
# If fname is a file descriptor
else:
if PY3 and 'b' in mode and isinstance(fname, TextIOBase):
f = fname.buffer
else:
f = fname
yield f
if isinstance(fname, string_types):
f.close()
class MultiFileDescriptor(object):
"""What is this class?
First of all, I want to load all format kaldi files
only by using read_kaldi function, and I want to load it
from file and file descriptor including standard input stream.
To judge its file format it is required to make the
file descriptor read and seek(to return original position).
However, stdin is not seekable, so I create this clas.
This class joints multiple file descriptors
and I assume this class is used as follwoing,
>>> string = fd.read(size)
>>> # To check format from string
>>> _fd = StringIO(string)
>>> newfd = MultiFileDescriptor(_fd, fd)
"""
def __init__(self, *fds):
self.fds = fds
if self.seekable():
self.init_pos = [f.tell() for f in self.fds]
else:
self.init_pos = None
def seek(self, offset, from_what=0):
if not self.seekable():
if PY3:
raise OSError
else:
raise IOError
if offset != 0:
raise NotImplementedError('offset={}'.format(offset))
if from_what == 1:
offset += self.tell()
from_what = 0
if from_what == 0:
for idx, f in enumerate(self.fds):
pos = self.init_pos[idx]
f.seek(pos + offset, 0)
offset -= (f.tell() - pos)
else:
raise NotImplementedError('from_what={}'.format(from_what))
def seekable(self):
return all(seekable(f) for f in self.fds)
def tell(self):
if not self.seekable():
if PY3:
raise OSError
else:
raise IOError
return sum(f.tell() - self.init_pos[idx]
for idx, f in enumerate(self.fds))
def read(self, size=-1):
remain = size
string = None
for f in self.fds:
if string is None:
string = f.read(remain)
else:
string += f.read(remain)
remain = size - len(string)
if remain == 0:
break
elif remain < 0:
remain = -1
return string
def parse_specifier(specifier):
"""A utility to parse "specifier"
Args:
specifier (str):
Returns:
parsed_dict (OrderedDict):
Like {'ark': 'file.ark', 'scp': 'file.scp'}
>>> d = parse_specifier('ark,t,scp:file.ark,file.scp')
>>> print(d['ark,t'])
file.ark
"""
sp = specifier.split(':', 1)
if len(sp) != 2:
if ':' not in specifier:
raise ValueError('The output file must be specified with '
'kaldi-specifier style,'
' e.g. ark,scp:out.ark,out.scp, but you gave as '
'{}'.format(specifier))
types, files = sp
types = types.split(',')
if 'ark' not in types and 'scp' not in types:
raise ValueError(
'One of/both ark and scp is required: '
'e.g. ark,scp:out.ark,out.scp: '
'{}'.format(specifier))
elif 'ark' in types and 'scp' in types:
if ',' not in files:
raise ValueError(
'You specified both ark and scp, '
'but a file path is given: '
'e.g. ark,scp:out.ark,out.scp: {}'.format(specifier))
files = files.split(',', 1)
else:
files = [files]
spec_dict = {'ark': None,
'scp': None,
't': False, # text
'o': False, # once
'p': False, # permissive
'f': False, # flush
's': False, # sorted
'cs': False, # called-sorted
}
for t in types:
if t not in spec_dict:
raise ValueError('Unknown option {}()'.format(t, types))
if t in ('scp', 'ark'):
if spec_dict[t] is not None:
raise ValueError('You specified {} twice'.format(t))
spec_dict[t] = files.pop(0)
else:
spec_dict[t] = True
return spec_dict
class LazyLoader(MutableMapping):
"""Don't use this class directly"""
def __init__(self, loader):
self._dict = {}
self._loader = loader
def __repr__(self):
return 'LazyLoader [{} keys]'.format(len(self))
def __getitem__(self, key):
ark_name = self._dict[key]
try:
return self._loader(ark_name)
except Exception:
warnings.warn(
'An error happens at loading "{}"'.format(ark_name))
raise
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return len(self._dict)
def __contains__(self, item):
return item in self._dict
def seekable(f):
if hasattr(f, 'seekable'):
return f.seekable()
# For Py2
else:
if hasattr(f, 'tell'):
try:
f.tell()
except (IOError, OSError):
return False
else:
return True
else:
return False
| 11,715
| 27.645477
| 78
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/kaldiio/compression_header.py
|
from __future__ import unicode_literals
import struct
import numpy as np
kAutomaticMethod = 1
kSpeechFeature = 2
kTwoByteAuto = 3
kTwoByteSignedInteger = 4
kOneByteAuto = 5
kOneByteUnsignedInteger = 6
kOneByteZeroOne = 7
class GlobalHeader(object):
"""This is a imitation class of the structure "GlobalHeader" """
def __init__(self, type, min_value, range, rows, cols, endian='<'):
if type in ('CM', 'CM2'):
c = 65535.
elif type == 'CM3':
c = 255.
else:
raise RuntimeError('Not supported type={}'.format(type))
self.type = type
self.c = c
self.min_value = min_value
self.range = range
self.rows = rows
self.cols = cols
self.endian = endian
@property
def size(self):
return 17 + len(self.type)
@staticmethod
def read(fd, type='CM', endian='<'):
min_value = struct.unpack(endian + 'f', fd.read(4))[0]
range = struct.unpack(endian + 'f', fd.read(4))[0]
rows = struct.unpack(endian + 'i', fd.read(4))[0]
cols = struct.unpack(endian + 'i', fd.read(4))[0]
return GlobalHeader(type, min_value, range, rows, cols, endian)
def write(self, fd, endian=None):
if endian is None:
endian = self.endian
fd.write(self.type.encode() + b' ')
fd.write(struct.pack(endian + 'f', self.min_value))
fd.write(struct.pack(endian + 'f', self.range))
fd.write(struct.pack(endian + 'i', self.rows))
fd.write(struct.pack(endian + 'i', self.cols))
return self.size
@staticmethod
def compute(array, compression_method, endian='<'):
if compression_method == kAutomaticMethod:
if array.shape[0] > 8:
compression_method = kSpeechFeature
else:
compression_method = kTwoByteAuto
if compression_method == kSpeechFeature:
matrix_type = 'CM'
elif compression_method == kTwoByteAuto or \
compression_method == kTwoByteSignedInteger:
matrix_type = 'CM2'
elif compression_method == kOneByteAuto or \
compression_method == kOneByteUnsignedInteger or \
compression_method == kOneByteZeroOne:
matrix_type = 'CM3'
else:
raise ValueError(
'Unknown compression_method: {}'.format(compression_method))
if compression_method == kSpeechFeature or \
compression_method == kTwoByteAuto or \
compression_method == kOneByteAuto:
min_value = array.min()
max_value = array.max()
if min_value == max_value:
max_value = min_value + (1. + abs(min_value))
range_ = max_value - min_value
elif compression_method == kTwoByteSignedInteger:
min_value = -32768.
range_ = 65535.
elif compression_method == kOneByteUnsignedInteger:
min_value = 0.
range_ = 255.
elif compression_method == kOneByteZeroOne:
min_value = 0.
range_ = 1.
else:
raise ValueError(
'Unknown compression_method: {}'.format(compression_method))
return GlobalHeader(
matrix_type, min_value, range_, array.shape[0], array.shape[1],
endian)
def float_to_uint(self, array):
if self.c == 65535.:
dtype = np.dtype(self.endian + 'u2')
else:
dtype = np.dtype(self.endian + 'u1')
# + 0.499 is to round to closest int
array = ((array - self.min_value) / self.range * self.c + 0.499)
return array.astype(np.dtype(dtype))
def uint_to_float(self, array):
array = array.astype(np.float32)
return self.min_value + array * self.range / self.c
class PerColHeader(object):
"""This is a imitation class of the structure "PerColHeader" """
def __init__(self, p0, p25, p75, p100, endian='<'):
# p means percentile
self.p0 = p0
self.p25 = p25
self.p75 = p75
self.p100 = p100
self.endian = endian
@property
def size(self):
return 8 * self.p0.shape[0]
@staticmethod
def read(fd, global_header):
endian = global_header.endian
# Read PerColHeader
size_of_percolheader = 8
buf = fd.read(size_of_percolheader * global_header.cols)
header_array = np.frombuffer(buf, dtype=np.dtype(endian + 'u2'))
header_array = np.asarray(header_array, np.float32)
# Decompress header
header_array = global_header.uint_to_float(header_array)
header_array = header_array.reshape(-1, 4, 1)
return PerColHeader(header_array[:, 0], header_array[:, 1],
header_array[:, 2], header_array[:, 3],
endian)
def write(self, fd, global_header, endian=None):
if endian is None:
endian = self.endian
header_array = np.concatenate(
[self.p0, self.p25, self.p75, self.p100], axis=1)
header_array = global_header.float_to_uint(header_array)
header_array = header_array.astype(np.dtype(endian + 'u2'))
byte_str = header_array.tobytes()
fd.write(byte_str)
return len(byte_str)
@staticmethod
def compute(array, global_header):
quarter_nr = array.shape[0] // 4
if array.shape[0] >= 5:
srows = np.partition(
array,
[0, quarter_nr, 3 * quarter_nr, array.shape[0] - 1], axis=0)
p0 = srows[0]
p25 = srows[quarter_nr]
p75 = srows[3 * quarter_nr]
p100 = srows[array.shape[0] - 1]
else:
srows = np.sort(array, axis=0)
p0 = srows[0]
if array.shape[0] > 1:
p25 = srows[1]
else:
p25 = p0 + 1
if array.shape[0] > 2:
p75 = srows[2]
else:
p75 = p25 + 1
if array.shape[0] > 3:
p100 = srows[3]
else:
p100 = p75 + 1
p0 = global_header.float_to_uint(p0)
p25 = global_header.float_to_uint(p25)
p75 = global_header.float_to_uint(p75)
p100 = global_header.float_to_uint(p100)
p0 = np.minimum(p0, 65532)
p25 = np.minimum(np.maximum(p25, p0 + 1), 65533)
p75 = np.minimum(np.maximum(p75, p25 + 1), 65534)
p100 = np.maximum(p100, p75 + 1)
p0 = global_header.uint_to_float(p0)
p25 = global_header.uint_to_float(p25)
p75 = global_header.uint_to_float(p75)
p100 = global_header.uint_to_float(p100)
p0 = p0[:, None]
p25 = p25[:, None]
p75 = p75[:, None]
p100 = p100[:, None]
return PerColHeader(p0, p25, p75, p100, global_header.endian)
def float_to_char(self, array):
p0, p25, p75, p100 = self.p0, self.p25, self.p75, self.p100
ma1 = array < p25
ma3 = array >= p75
ma2 = ~ma1 * ~ma3
# +0.5 round to the closest int
tmp = (array - p0) / (p25 - p0) * 64. + 0.5
tmp = np.where(tmp < 0., 0., np.where(tmp > 64., 64., tmp))
tmp2 = ((array - p25) / (p75 - p25) * 128. + 64.5)
tmp2 = np.where(tmp2 < 64., 64., np.where(tmp2 > 192., 192., tmp2))
tmp3 = ((array - p75) / (p100 - p75) * 63. + 192.5)
tmp3 = np.where(tmp3 < 192., 192., np.where(tmp3 > 255., 255., tmp3))
array = np.where(ma1, tmp, np.where(ma2, tmp2, tmp3))
return array.astype(np.dtype(self.endian + 'u1'))
def char_to_float(self, array):
array = array.astype(np.float32)
p0, p25, p75, p100 = self.p0, self.p25, self.p75, self.p100
ma1 = array <= 64
ma3 = array > 192
ma2 = ~ma1 * ~ma3 # 192 >= array > 64
return np.where(
ma1, p0 + (p25 - p0) * array * (1 / 64.),
np.where(ma2, p25 + (p75 - p25) * (array - 64.) * (1 / 128.),
p75 + (p100 - p75) * (array - 192.) * (1 / 63.)))
| 8,165
| 34.04721
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/kaldiio/wavio.py
|
from __future__ import unicode_literals
import numpy as np
import kaldiio.python_wave as wave
def read_wav(fd, return_size=False):
wd = wave.open(fd)
rate = wd.getframerate()
nchannels = wd.getnchannels()
nbytes = wd.getsampwidth()
if nbytes == 1:
# 8bit-PCM is unsigned
dtype = 'uint8'
elif nbytes == 2:
dtype = 'int16'
else:
raise ValueError('bytes_per_sample must be 1, 2, 4 or 8')
data = wd.readframes(wd.getnframes())
size = 44 + len(data)
array = np.frombuffer(data, dtype=np.dtype(dtype))
if nchannels > 1:
array = array.reshape(-1, nchannels)
if return_size:
return (rate, array), size
else:
return rate, array
def write_wav(fd, rate, array):
if array.dtype == np.uint8:
sampwidth = 1
elif array.dtype == np.int16:
sampwidth = 2
else:
raise ValueError('Not Supported dtype {}'.format(array.dtype))
if array.ndim == 2:
nchannels = array.shape[1]
elif array.ndim == 1:
nchannels = 1
else:
raise ValueError(
'Not Supported dimension: 0 or 1, but got {}'.format(array.ndim))
w = wave.Wave_write(fd)
w.setnchannels(nchannels)
w.setsampwidth(sampwidth)
w.setframerate(rate)
data = array.tobytes()
w.writeframes(data)
return 44 + len(data)
| 1,370
| 23.927273
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/data/kaldiio/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/data/kaldiio/io.py
|
import random
import struct
import numpy as np
import os
def write_ark(ark, dic, scp=None, append=False):
# Write ark
mode = 'ab' if append else 'wb'
pos_list = []
with open(ark, mode) as fd:
pos = fd.tell() if append else 0
for key in dic:
encode_key = (key + ' ').encode()
fd.write(encode_key)
pos += len(encode_key)
pos_list.append(pos)
data = dic[key]
pos += write_array(fd, data)
# Write scp
if scp is not None:
mode = 'a' if append else 'w'
with open(scp, mode) as fd:
for key, position in zip(dic, pos_list):
fd.write(key + ' ' + ark + ':' + str(position) + os.linesep)
def write_ark_file(ark_file, scp_file, dic, scp=None):
pos_lst, len_lst = [], []
pos = ark_file.tell()
for key in dic:
encode_key = (key + ' ').encode()
ark_file.write(encode_key)
pos += len(encode_key)
pos_lst.append(pos)
data = dic[key]
len_lst.append(len(data))
pos += write_array(ark_file, data)
ark = ark_file.name
for key, ps, ln in zip(dic, pos_lst, len_lst):
scp_file.write(key + ' ' + ark + ':' + str(ps) + ' ' + str(ln) + os.linesep)
def write_array(fd, array):
size = 0
assert isinstance(array, np.ndarray), type(array)
fd.write(b'\0B')
size += 2
dt = array.dtype
if dt == np.float32 or dt == np.float16:
atype = b'FM ' if dt == np.float32 else b'HM '
if len(array.shape) == 2:
fd.write(atype)
size += 3
fd.write(b'\4')
size += 1
fd.write(struct.pack('<i', len(array))) # Rows
size += 4
fd.write(b'\4')
size += 1
fd.write(struct.pack('<i', array.shape[1])) # Cols
size += 4
fd.write(array.tobytes())
size += array.nbytes
else:
raise ValueError('Unsupported array type: {}'.format(dt))
return size
| 2,028
| 27.577465
| 84
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/classify_trainer.py
|
from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_classifier, optimize_model, init_model_parameters
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import ClassifierLoss
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
def prepare_sample(batch, device=None):
"""
Put minibatch on the corresponding GPU
:param batch:
:param device:
:return:
"""
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
batch.cuda(fp16=False, device=device)
return batch
def generate_data_iterator(dataset, rank, world_size, seed,
num_workers=1, epoch=1., buffer_size=0):
# check if dataset is a list:
if isinstance(dataset, list):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank)
else:
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank)
return data_iterator
def zero_tensor(device=None):
if device is None:
return torch.Tensor([0]).cuda()
else:
return torch.Tensor([0]).to(device)
class ClassifierTrainer(object):
def __init__(self, device, train_data, valid_data, dicts, opt, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
self.device = device
opt.node_rank = 0
opt.nodes = 1
self.world_size = len(opt.gpus)
# in the case of single node distributed, it should equal self.device
self.rank = self.device
# make a group to later use with self.all_reduce
self.group = dist.group.WORLD
self.print("[INFO] Training Options:", opt)
if self.world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://', world_size=self.world_size, rank=self.rank)
self.model = None
if self.rank == 0:
self.train_data = train_data
self.valid_data = valid_data
else:
# Do we really need to deepcopy the data instances (which could cause memory leak easily)
self.train_data = copy.deepcopy(train_data)
self.valid_data = copy.deepcopy(valid_data)
self.dicts = dicts
self.opt = opt
self.cuda = (len(opt.gpus) >= 1 and opt.gpus[0] >= 0)
assert self.cuda, "[ERROR] Training is only available on GPUs."
self.start_time = 0
# setting up models and others
torch.manual_seed(self.opt.seed)
if self.is_main():
print("[INFO] Building models .... ", flush=True)
model = build_classifier(opt, dicts)
loss_function = ClassifierLoss(opt.model_size, dicts['tgt'].size(), label_smoothing=opt.label_smoothing)
# This function replaces modules with the more optimized counterparts so that it can run faster
# Currently exp with LayerNorm
# if not opt.memory_profiling:
# # distributed is required to convert BatchNorm to SyncBatchNorm for DDP
optimize_model(model, distributed=(self.world_size > 1))
if 'wav2vec2' not in opt.model:
init_model_parameters(model, opt)
self.model = model
self.loss_function = loss_function
self.grad_scaler = torch.cuda.amp.GradScaler()
if opt.mpc:
from onmt.modules.loss import MPCLoss
self.mpc_loss = MPCLoss()
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
self.model.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint and checkpoint['scaler'] is not None:
self.grad_scaler.load_state_dict(checkpoint['scaler'])
if self.cuda:
torch.cuda.set_device(self.device)
self.model = self.model.cuda(device=self.device)
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if self.is_main():
print("[INFO] Optimizer: ", self.optim.optimizer)
if opt.load_from:
if 'optim' in checkpoint and checkpoint['optim'] is not None and not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if self.world_size > 1:
# find_unused_parameters may be required for dropped layer (parameters that are not connected to
# any particular graph)
find_unused_parameters = False if opt.death_rate == 0.0 else True
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused_parameters)
print("[INFO] Process %d ready." % self.rank, flush=True)
def is_main(self):
return self.rank == 0
def all_reduce(self, tensor, **kwargs):
if self.world_size > 1:
dist.all_reduce(tensor, **kwargs)
# otherwise, do nothing
return
def print(self, *content, flush=False):
"""
A helper function to print only on the main process
:param flush:
:param content:
:return:
"""
if self.is_main():
print(*content, flush=flush)
else:
return
# def load_encoder_weight(self, checkpoint_file):
#
# print("Loading pretrained Encoder Weights from %s" % checkpoint_file, flush=True)
# checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
#
# pretrained_model = build_model(checkpoint['opt'], checkpoint['dicts'])
# pretrained_model.load_state_dict(checkpoint['model'])
#
# model = self.model.module if self.world_size > 1 else self.model
#
# model.load_encoder_weights(pretrained_model)
#
# return
#
# def load_decoder_weight(self, checkpoint_file):
#
# self.print("Loading pretrained models from %s" % checkpoint_file)
# checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
# chkpoint_dict = checkpoint['dicts']
#
# pretrained_model = build_model(checkpoint['opt'], chkpoint_dict)
# pretrained_model.load_state_dict(checkpoint['model'])
#
# self.print("Loading pretrained decoder weights ...")
# # first we have to remove the embeddings which probably have difference size ...
# pretrained_word_emb = pretrained_model.decoder.word_lut
# pretrained_model.decoder.word_lut = None
# pretrained_lang_emb = pretrained_model.decoder.language_embeddings
# pretrained_model.decoder.language_embeddings = None
#
# # actually we assume that two decoders have the same language embeddings...
# untrained_word_emb = self.model.decoder.word_lut
# self.model.decoder.word_lut = None
# untrained_lang_emb = self.model.decoder.language_embeddings
# self.model.decoder.language_embeddings = None
#
# decoder_state_dict = pretrained_model.decoder.state_dict()
# self.model.decoder.load_state_dict(decoder_state_dict)
#
# # now we load the embeddings ....
# n_copies = 0
# for token in self.dicts['tgt'].labelToIdx:
#
# untrained_id = self.dicts['tgt'].labelToIdx[token]
#
# if token in chkpoint_dict['tgt'].labelToIdx:
# pretrained_id = chkpoint_dict['tgt'].labelToIdx[token]
# untrained_word_emb.weight.data[untrained_id].copy_(pretrained_word_emb.weight.data[pretrained_id])
#
# self.model.generator[0].linear.bias.data[untrained_id].copy_(pretrained_model
# .generator[0].linear.bias.data[
# pretrained_id])
# n_copies += 1
#
# self.print("Copied embedding for %d words" % n_copies)
# self.model.decoder.word_lut = untrained_word_emb
#
# # now we load the language embeddings ...
# if pretrained_lang_emb and untrained_lang_emb and 'langs' in chkpoint_dict:
# for lang in self.dicts['langs']:
#
# untrained_id = self.dicts['langs'][lang]
# if lang in chkpoint_dict['langs']:
# pretrained_id = chkpoint_dict['langs'][lang]
# untrained_lang_emb.weight.data[untrained_id].copy_(pretrained_lang_emb.weight.data[pretrained_id])
#
# self.model.decoder.language_embeddings = untrained_lang_emb
def warm_up(self):
return
# """
# Warmup the memory allocator, by attempting to fit the largest batch
# :return:
# """
#
# # if self.opt.memory_profiling:
# # from pytorch_memlab import MemReporter
# # reporter = MemReporter()
# #
# batch = self.train_data[0].get_largest_batch() if isinstance(self.train_data, list) \
# else self.train_data.get_largest_batch()
# opt = self.opt
#
# if self.cuda:
# batch.cuda(fp16=False)
#
# self.model.train()
# self.loss_function.train()
# self.model.zero_grad()
# oom = False
#
# if self.opt.memory_profiling:
# self.print("Input size: ")
# self.print(batch.size, batch.src_size, batch.tgt_size)
#
# if opt.streaming:
# streaming_state = self.model.init_stream()
# else:
# streaming_state = None
#
# try:
# with autocast():
# targets = batch.get('target_output')
# tgt_mask = None
# outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
# zero_encoder=opt.zero_encoder,
# mirror=opt.mirror_loss, streaming_state=streaming_state,
# nce=opt.nce)
#
# outputs['tgt_mask'] = tgt_mask
#
# loss_dict = self.loss_function(outputs, targets, model=self.model)
# loss_data = loss_dict['data']
# loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
# full_loss = loss
#
# if opt.ctc_loss > 0.0:
# ctc_loss = self.ctc_loss_function(outputs, targets)
# ctc_loss_data = ctc_loss.item()
# full_loss = full_loss + opt.ctc_loss * ctc_loss
#
# if opt.mirror_loss:
# rev_loss = loss_dict['rev_loss']
# mirror_loss = loss_dict['mirror_loss']
# full_loss = full_loss + rev_loss + mirror_loss
#
# # reconstruction loss
# if opt.reconstruct:
# rec_loss = loss_dict['rec_loss']
# rec_loss = rec_loss
# full_loss = full_loss + rec_loss
#
# if opt.lfv_multilingual:
# lid_logits = outputs['lid_logits']
# lid_labels = batch.get('target_lang')
# lid_loss_function = self.loss_function.get_loss_function('lid_loss')
# lid_loss = lid_loss_function(lid_logits, lid_labels)
# full_loss = full_loss + lid_loss
#
# optimizer = self.optim.optimizer
#
# if self.opt.memory_profiling:
# reporter.report(verbose=True)
#
# # for obj in gc.get_objects():
# # try:
# # if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# # # print(varname(obj))
# # # we can rule out parameter cost later
# # # if 'parameter' not in type(obj):
# # # if len(obj.shape) == 3:
# # # if not isinstance(obj, torch.nn.parameter.Parameter):
# # # tensor = obj
# # # numel = tensor.
# # print(type(obj), obj.type(), obj.size())
# # except:
# # pass
#
# # print("Memory profiling complete.")
# # print(torch.cuda.memory_summary())
# # exit()
#
# self.grad_scaler.scale(full_loss).backward()
# # if self.cuda:
# # with amp.scale_loss(full_loss, optimizer) as scaled_loss:
# # scaled_loss.backward()
# # else:
# # loss.div_(batch.tgt_size).backward()
#
# if self.opt.memory_profiling:
# print('========= after backward =========')
# reporter.report(verbose=True)
#
# self.model.zero_grad()
# self.optim.zero_grad()
# # self.optim.step()
# # self.optim.reset()
#
# except RuntimeError as e:
# if 'out of memory' in str(e):
# oom = True
# else:
# raise e
#
# if oom:
# print("[INFO] Warning: out-of-memory in warming up. "
# "This is due to the largest batch is too big for the GPU.",
# flush=True)
# else:
# self.print("[INFO] Warming up successfully.", flush=True)
#
# if self.opt.memory_profiling:
# if hasattr(torch.cuda, 'memory_summary'):
# print(torch.cuda.memory_summary())
# exit()
# maybe save by accuracy?
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state_dict = self.model.module.state_dict()
else:
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'scaler': self.grad_scaler.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
self.print("[INFO] Running evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(epoch_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
# self.model.module.reset_states()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
with torch.no_grad():
while not data_iterator.end_of_epoch():
samples = next(epoch_iterator)
if samples:
with autocast():
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
numel = loss_dict['numel']
n_correct = loss_dict['n_correct']
total_loss.add_(loss_data)
total_words.add_(numel)
total_correct.add_(n_correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
accuracy = total_correct.item() / total_words.item()
loss = total_loss / total_words
output = {'loss': loss, 'accuracy': accuracy}
return output
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
# Clear the gradients of the model
self.model.zero_grad()
# self.model.module.reset_states()
dataset = train_data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
# TODO: fix resume which is currently buggy
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_correct = zero_tensor()
report_mpc_loss, report_mpc_numel = zero_tensor(), zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_correct = zero_tensor()
report_src_words = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
grad_div = 1
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
i = i * self.world_size
numel = 0
while not data_iterator.end_of_epoch():
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
# TODO: dealing with oom during distributed training
oom = zero_tensor()
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
counter = counter + 1
# reduction_disabled = False if counter >= opt.update_frequency or i == (n_samples - 1) else True
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast():
targets = batch.get('target')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
batch_size = batch.size
# outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
numel = loss_dict['numel']
n_correct = loss_dict['n_correct']
full_loss = loss
# # Todo: MPC loss
if self.opt.mpc:
mpc_loss_dict = self.mpc_loss(outputs)
mpc_loss_data = mpc_loss_dict['data']
mpc_loss = mpc_loss_dict['loss']
mpc_numel = mpc_loss_dict['numel']
# mpc_loss_data = 0
# mpc_numel = 0
full_loss = full_loss + 0.0001 * mpc_loss
else:
mpc_loss_data = 0
mpc_numel = 0
# grad scaler has to be done outside of the autocast
# this line basically equals full_loss.mul_(some_scale).backward()
# which means the grad scaler doesn't internally change
self.grad_scaler.scale(full_loss).backward()
del outputs
batch_size = batch.size
src_size = batch.src_size
tgt_size = numel
num_accumulated_words.add_(numel)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency:
update_flag = True
elif i == n_samples - 1: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
# self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0 / grad_div
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
else:
grad_denom = 1
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(self.model.parameters(), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
self.grad_scaler.unscale_(self.optim.optimizer)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt.max_grad_norm)
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g' % valid_output['accuracy'])
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, 1 - valid_output['accuracy'], itr=data_iterator)
num_words = tgt_size
report_loss.add_(loss_data)
report_correct.add_(n_correct)
report_tgt_words.add_(numel)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_mpc_loss.add_(mpc_loss_data)
report_mpc_numel.add_(mpc_numel)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_correct, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item())))
assert report_correct.item() <= report_tgt_words.item()
log_string += ("accuracy: %6.4f; " %
(report_correct.item() / report_tgt_words.item()))
if opt.mpc:
log_string += ("mpc loss: %6.6f; " %
(report_mpc_loss.item() / report_mpc_numel.item() ))
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_correct.zero_()
report_mpc_loss.zero_()
report_mpc_numel.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
# Only load the progress when we use the same optimizer
# if 'itr' in checkpoint:
# itr_progress = checkpoint['itr']
# else:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
print('[INFO] Validation accuracy: %g' % valid_output['accuracy'], flush=True)
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g' % valid_output['accuracy'], flush=True)
self.save(epoch, 1 - valid_output['accuracy'])
itr_progress = None
resume = False
| 31,146
| 38.576874
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/bayes_by_backprop_trainer.py
|
from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
from .trainer import BaseTrainer
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
class BayesianTrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
# An ugly hack to switch between align right and align left
if hasattr(self.model, 'relative'):
if self.model.relative:
self.train_data.src_align_right = True
self.train_data.tgt_align_right = False
self.valid_data.src_align_right = True
self.valid_data.tgt_align_right = False
def warm_up(self):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
if self.opt.memory_profiling:
from pytorch_memlab import MemReporter
reporter = MemReporter()
batch = self.train_data.get_largest_batch()
opt = self.opt
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
self.model.train()
self.model.zero_grad()
oom = False
if self.opt.memory_profiling:
print("Input size: ")
print(batch.size, batch.src_size, batch.tgt_size)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
try:
targets = batch.get('target_output')
tgt_mask = targets.data.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
log_prior = self.model.log_prior()
log_variational_posterior = self.model.log_variational_posterior()
full_loss = loss + (log_variational_posterior - log_prior)
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
optimizer = self.optim.optimizer
if self.opt.memory_profiling:
reporter.report(verbose=True)
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# # print(varname(obj))
# # we can rule out parameter cost later
# # if 'parameter' not in type(obj):
# # if len(obj.shape) == 3:
# # if not isinstance(obj, torch.nn.parameter.Parameter):
# # tensor = obj
# # numel = tensor.
# print(type(obj), obj.type(), obj.size())
# except:
# pass
# print("Memory profiling complete.")
# print(torch.cuda.memory_summary())
# exit()
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if self.opt.memory_profiling:
print('========= after backward =========')
reporter.report(verbose=True)
except RuntimeError as e:
if 'out of memory' in str(e):
oom = True
else:
raise e
if oom:
print("* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU")
else:
print("* Warming up successuflly.")
if self.opt.memory_profiling:
if hasattr(torch.cuda, 'memory_summary'):
print(torch.cuda.memory_summary())
exit()
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'additional_batch_order': getattr(self, 'additional_batch_order', None),
'additional_data_iteration': getattr(self, 'additional_data_iteration', None),
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
total_loss = 0
total_words = 0
opt = self.opt
data_iterator = DataIterator(data, data.collater, data.batches, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
self.model.eval()
self.loss_function.eval()
self.model.reset_states()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state)
if opt.streaming:
streaming_state = outputs['streaming_state']
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
total_loss += loss_data
total_words += batch.tgt_size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
self.model.reset_states()
dataset = train_data
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = 0, 0, 0
total_non_pads = 0
report_loss, report_tgt_words = 0, 0
report_src_words = 0
report_sents = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
report_log_prior = 0
report_log_variational_posterior = 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
update_counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
nan = False
nan_counter = 0
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
batch = next(epoch_iterator)
batch = rewrap(batch)
grad_scaler = self.opt.batch_size_words if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
targets = batch.get('target_output')
tgt_mask = targets.data.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state)
batch_size = batch.size
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
log_prior = self.model.log_prior()
log_variational_posterior = self.model.log_variational_posterior()
# the coeff starts off at 1 for each epoch
# from BBB paper: The first mini batches in each epoch have large KL coeff
# # the later minibatches are influenced by the data
# denom = math.pow(1.5, min(32, update_counter))
# min_coeff = 1 / (self.opt.model_size ** 2)
# kl_coeff = max(1 / denom, min_coeff)
kl_coeff = 1 / (batch.tgt_size * opt.update_frequency)
# kl_coeff = 1 / (self.opt.model_size ** 2)
# kl_coeff = 1
full_loss = loss + kl_coeff * (log_variational_posterior - log_prior)
# print(log_variational_posterior, log_prior)
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss = None
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
num_accumulated_words = 0
num_accumulated_sents = 0
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
tgt_size = batch.tgt_size
counter = counter + 1
num_accumulated_words += tgt_size
num_accumulated_sents += batch_size
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif 0 < opt.batch_size_update <= num_accumulated_words:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
num_updates = self.optim._step
update_counter += 1
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
num_words = tgt_size
report_loss += loss_data
report_log_prior += log_prior.item()
report_log_variational_posterior += log_variational_posterior.item()
report_tgt_words += num_words
report_src_words += src_size
report_sents += 1
total_loss += loss_data
total_words += num_words
total_tokens += batch.get('target_output').nelement()
total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
optim = self.optim
batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss += rec_loss_data
if opt.mirror_loss:
report_rev_loss += rev_loss_data
report_mirror_loss += mirror_loss_data
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss / report_tgt_words)))
kl_div = report_log_variational_posterior - report_log_prior
log_string += ("KL q||p: %6.2f ; " % (kl_div / report_sents))
if opt.reconstruct:
rec_ppl = math.exp(report_rec_loss / report_src_words.item())
log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
rev_ppl = math.exp(report_rev_loss / report_tgt_words)
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
# mirror loss per word
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
log_string += ("%5.0f src/s; %5.0f tgt/s; " %
(report_src_words / (time.time() - start),
report_tgt_words / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_words, report_src_words = 0, 0
report_sents = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
report_log_prior, report_log_variational_posterior = 0, 0
start = time.time()
i = i + 1
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
opt.start_epoch = int(math.floor(float(checkpoint['epoch'] + 1)))
resume = True
else:
itr_progress = None
resume = False
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.start_time = time.time()
for epoch in range(opt.start_epoch, opt.start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
print('Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.save(epoch, valid_ppl)
itr_progress = None
resume = False
| 23,966
| 38.35468
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/stats.py
|
""" Statistics calculation utility """
from __future__ import division
import time
import math
import sys
import datetime
from onmt.train_utils.meters import AverageMeter, TimeMeter
class Logger(object):
def __init__(self, optim, scaler=None):
self.optim = optim
self.meters = dict()
self.start_time = time.time()
self.scaler = scaler
# initializing the meters
self.meters["total_loss"] = AverageMeter()
self.meters["total_words"] = AverageMeter()
self.meters["report_loss"] = AverageMeter()
self.meters["report_tgt_words"] = AverageMeter()
self.meters["report_src_words"] = AverageMeter()
self.meters["kl"] = AverageMeter()
self.meters["kl_prior"] = AverageMeter()
self.meters["gnorm"] = AverageMeter()
self.meters["oom"] = AverageMeter()
self.meters["total_sloss"] = AverageMeter()
self.meters["baseline"] = AverageMeter()
self.meters["R"] = AverageMeter()
self.meters["ce"] = AverageMeter()
self.meters["q_entropy"] = AverageMeter()
self.meters["q_mean"] = AverageMeter()
self.meters["q_var"] = AverageMeter()
self.meters["l2"] = AverageMeter()
self.meters["l2_target"] = AverageMeter()
self.meters["total_lang_correct"] = AverageMeter()
self.meters["total_sents"] = AverageMeter()
def reset(self):
for key in self.meters:
self.meters[key].reset()
self.start_time = time.time()
def reset_meter(self, key):
self.meters[key].reset()
def reset_time(self):
self.start_time = time.time()
def log(self, epoch, iteration, data_size):
ppl = math.exp(self.meters["report_loss"].sum / self.meters["report_tgt_words"].sum)
grad_norm = self.meters["gnorm"].avg
oom_count = self.meters["oom"].sum
baseline = self.meters['baseline'].avg
kl = self.meters['kl'].avg # normalized by 6 distributions and the batch_size
R = self.meters['R'].avg #
ce = self.meters['ce'].avg
q_ent = self.meters['q_entropy'].avg
q_mean = self.meters['q_mean'].avg
q_var = self.meters['q_var'].avg
kl_prior = self.meters['kl_prior'].avg
l2 = self.meters['l2'].avg if 'l2' in self.meters else None
l2_target = self.meters['l2_target'].avg if 'l2_target' in self.meters else None
log_string = (("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; lr: %.7f ; num updates: %7d "
+ "%5.0f tgt tok/s; gnorm %.3f; oom %d") %
(epoch, iteration+1, data_size,
ppl,
self.optim.getLearningRate(),
self.optim._step,
self.meters["report_tgt_words"].sum/(time.time()-self.start_time),
grad_norm if grad_norm else 0,
oom_count))
if ce is not None:
log_string += "; ce %.3f" % ce
if baseline is not None:
log_string += "; bl %.3f" % baseline
if kl is not None:
log_string += "; kl %.3f" % kl
if kl_prior is not None:
log_string += "; kl_prior %.3f" % kl_prior
if R is not None:
log_string += "; R %.3f" % R
if q_ent is not None:
log_string += "; q_ent %.3f" % q_ent
if q_mean is not None:
log_string += "; q_mean %.3f" % q_mean
if q_var is not None:
log_string += "; q_var %.3f" % q_var
if self.meters['total_lang_correct'].avg is not None:
total_lang_correct = self.meters['total_lang_correct'].sum
acc = total_lang_correct / self.meters['total_sents'].sum * 100.0
log_string += "; acc %.3f " % acc
if l2 is not None:
log_string += "; l2 %.3f" % l2
if l2_target is not None:
log_string += "; l2 target %.3f" % l2_target
# Don't forget to print this ...
print(log_string)
| 4,077
| 33.559322
| 93
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/accent_gan_trainer.py
|
from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
from .trainer import BaseTrainer
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def generate_data_iterator(dataset, seed, num_workers=1, epoch=1., buffer_size=0):
# check if dataset is a list:
if isinstance(dataset, list):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size)
else:
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size)
return data_iterator
class SpeechAETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
def warm_up(self):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
print("Tacotron_warmup")
if self.opt.memory_profiling:
from pytorch_memlab import MemReporter
reporter = MemReporter()
batch = self.train_data[0].get_largest_batch() if isinstance(self.train_data, list) \
else self.train_data.get_largest_batch()
opt = self.opt
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
self.model.train()
self.model.zero_grad()
oom = False
if self.opt.memory_profiling:
print("Input size: ")
print(batch.size, batch.src_size, batch.tgt_size)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
try:
targets = batch.get('target_output')
tgt_mask = None
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1,2,0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
# loss_dict = self.loss_function(outputs, targets, model=self.model)
loss = loss # a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
if self.opt.memory_profiling:
reporter.report(verbose=True)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.div_(batch.tgt_size).backward()
if self.opt.memory_profiling:
print('========= after backward =========')
reporter.report(verbose=True)
self.model.zero_grad()
self.optim.zero_grad()
except RuntimeError as e:
if 'out of memory' in str(e):
oom = True
else:
raise e
if oom:
print("* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU.")
else:
print("* Warming up successuflly.")
if self.opt.memory_profiling:
if hasattr(torch.cuda, 'memory_summary'):
print(torch.cuda.memory_summary())
exit()
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
print('Train loss: %g' % train_loss)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.save(epoch, valid_loss)
itr_progress = None
resume = False
def eval(self, data):
total_loss = 0
total_tgt_frames = 0
total_sent = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
# self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
loss_data = loss.data.item()
total_loss += loss_data
total_tgt_frames += batch.src_size
total_sent += batch.size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / data_size * 100
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_loss, total_frames = 0, 0
report_loss, report_tgt_frames,report_sent = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
# targets = batch.get('target_output')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(0, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
batch_size = batch.size
loss_data = loss.data.item()
# a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
counter = counter + 1
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
# if self.opt.normalize_gradient:
# grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1.0
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
# num_accumulated_words = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
report_loss += loss_data
# report_tgt_words += num_words
num_accumulated_sents += batch_size
report_sent += batch_size
total_frames+= src_size
report_tgt_frames += src_size
total_loss += loss_data
optim = self.optim
# batch_efficiency = total_non_pads / total_tokens
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; loss : %6.2f ; " %
(epoch, i + 1, len(data_iterator),
report_loss ))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
#
log_string += ("%5.0f src tok/s " %
(report_tgt_frames / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_frames = 0
report_sent = 0
start = time.time()
i = i + 1
return total_loss / n_samples * 100
class XETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
if opt.lfv_multilingual or opt.lid_loss:
from onmt.models.speech_recognizer.lid_loss import CrossEntropyLIDLoss
lid_loss = CrossEntropyLIDLoss(opt.n_languages, opt.label_smoothing, opt.fast_xentropy)
self.loss_function.add_loss_function(lid_loss, 'lid_loss')
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
# print(234)
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
# An ugly hack to switch between align right and align left
if hasattr(self.model, 'relative'):
if self.model.relative:
self.train_data.src_align_right = True
self.train_data.tgt_align_right = False
self.valid_data.src_align_right = True
self.valid_data.tgt_align_right = False
self.valid_data.tgt_align_right = False
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
total_loss = 0
total_words = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce)
if opt.streaming:
streaming_state = outputs['streaming_state']
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
total_loss += loss_data
total_words += batch.tgt_size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
self.model.reset_states()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = 0, 0, 0
total_non_pads = 0
report_loss, report_tgt_words = 0, 0
report_src_words = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
# if opt.streaming:
# if train_data.is_new_stream():
# streaming_state = self.model.init_stream()
# else:
# streaming_state = None
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce)
# print("time " + str(time.time() - start_time_t))
batch_size = batch.size
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.lfv_multilingual or opt.lid_loss:
lid_logits = outputs['lid_logits']
lid_labels = batch.get('target_lang')
lid_loss_function = self.loss_function.get_loss_function('lid_loss')
lid_loss = lid_loss_function([lid_logits.unsqueeze(0)] , lid_labels)
full_loss = full_loss + lid_loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
num_accumulated_words = 0
num_accumulated_sents = 0
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
tgt_size = batch.tgt_size
counter = counter + 1
num_accumulated_words += tgt_size
num_accumulated_sents += batch_size
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif 0 < opt.batch_size_update <= num_accumulated_words:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
num_words = tgt_size
report_loss += loss_data
report_tgt_words += num_words
report_src_words += src_size
total_loss += loss_data
total_words += num_words
total_tokens += batch.get('target_output').nelement()
total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
optim = self.optim
batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss += rec_loss_data
if opt.mirror_loss:
report_rev_loss += rev_loss_data
report_mirror_loss += mirror_loss_data
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss / report_tgt_words)))
if opt.reconstruct:
rec_ppl = math.exp(report_rec_loss / report_src_words.item())
log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
rev_ppl = math.exp(report_rev_loss / report_tgt_words)
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
# mirror loss per word
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words / (time.time() - start),
report_tgt_words / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_words, report_src_words = 0, 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
i = i + 1
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
self.start_time = time.time()
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
# valid_loss = self.train_epoch(0)
# valid_ppl = math.exp(min(valid_loss, 100))
#
# print('Validation perplexity: %g' % valid_ppl)
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
print('Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.save(epoch, valid_ppl)
itr_progress = None
resume = False
| 39,445
| 36.675263
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/evaluator.py
|
from __future__ import division
import sys, tempfile
import onmt
import onmt.modules
#~ from onmt.metrics.gleu import sentence_gleu
#~ from onmt.metrics.sbleu import sentence_bleu
from onmt.metrics.bleu import moses_multi_bleu
#~ from onmt.utils import compute_score
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
class Evaluator(object):
def __init__(self, model, dataset, opt, cuda=False):
# some properties
self.dataset = dataset
self.dicts = dataset['dicts']
self.setIDs = dataset['dicts']['setIDs']
self.model = model
self.cuda = cuda
# self.translator = onmt.InplaceTranslator(self.model, self.dicts,
# beam_size=1,
# cuda=self.cuda)
def setScore(self, score):
self.score = score
def setCriterion(self, criterion):
self.criterion = criterion
# Compute perplexity of a data given the model
# For a multilingual dataset, we may need the setIDs of the desired languages
# data is a dictionary with key = setid and value = DataSet object
def eval_perplexity(self, data, loss_function):
total_loss = 0
total_words = 0
self.model.eval()
with torch.no_grad():
for i in range(len(data)):
batch = data[i]
_, predictions = model(batch)
# exclude <s> from targets
targets = batch[1][1:]
# loss, _ = memoryEfficientLoss(
# outputs, targets, model.generator, criterion, eval=True)
total_loss += loss
total_words += targets.data.ne(onmt.constants.PAD).sum()
model.train()
return total_loss / total_words
#~ def eval_reinforce(self, data, score, verbose=False):
#~
#~ total_score = 0
#~ total_sentences = 0
#~
#~ total_hit = 0
#~ total_hit_sentences = 0
#~ total_gleu = 0
#~
#~ model = self.model
#~ model.eval()
#~ tgtDict = self.dicts['tgt']
#~ srcDict = self.dicts['src']
#~
#~ for i in range(len(data)):
#~ batch = data[i][:-1]
#~ src = batch[0]
#~ ref = batch[1][1:]
#~ # we need to sample
#~ sampled_sequence = model.sample(src, max_length=100, argmax=True)
#~ batch_size = ref.size(1)
#~
#~ for idx in xrange(batch_size):
#~
#~ tgtIds = sampled_sequence.data[:,idx]
#~
#~ tgtWords = tgtDict.convertTensorToLabels(tgtIds, onmt.Constants.EOS)
#~
#~ refIds = ref.data[:,idx]
#~
#~ refWords = tgtDict.convertTensorToLabels(refIds, onmt.Constants.EOS)
#~
#~ # return a single score value
#~ s = score(refWords, tgtWords)
#~
#~ if len(s) > 2:
#~ gleu = s[1]
#~ hit = s[2]
#~
#~ if hit >= 0:
#~ total_hit_sentences += 1
#~ total_hit += hit
#~
#~ if verbose:
#~ sampledSent = " ".join(tgtWords)
#~ refSent = " ".join(refWords)
#~
#~ if s[0] > 0:
#~ print "SAMPLE :", sampledSent
#~ print " REF :", refSent
#~ print "Score =", s
#~
#~ # bleu is scaled by 100, probably because improvement by .01 is hard ?
#~ total_score += s[0] * 100
#~
#~ total_sentences += batch_size
#~
#~ if total_hit_sentences > 0:
#~ average_hit = total_hit / total_hit_sentences
#~ print("Average HIT : %.2f" % (average_hit * 100))
#~
#~ average_score = total_score / total_sentences
#~ model.train()
#~ return average_score
# Compute translation quality of a data given the model
# def eval_translate(self, data, beam_size=1, batch_size=16, bpe=True, bpe_token="@"):
# model = self.model
# setIDs = self.setIDs
# count = 0
# one score for each language pair
# bleu_scores = dict()
# for sid in data: # sid = setid
# if self.adapt:
# if sid != self.adapt_pair:
# continue
# dset = data[sid]
# model.switchLangID(setIDs[sid][0], setIDs[sid][1])
# model.switchPairID(sid)
# tgt_lang = self.dicts['tgtLangs'][setIDs[sid][1]]
# src_lang = self.dicts['srcLangs'][setIDs[sid][0]]
# tgt_dict = self.dicts['vocabs'][tgt_lang]
# src_dict = self.dicts['vocabs'][src_lang]
# we print translations into temp files
# outF = tempfile.NamedTemporaryFile()
# outRef = tempfile.NamedTemporaryFile()
# for i in range(len(dset)):
# exclude original indices
# batch = dset[i][:-1]
# src = batch[0]
# exclude <s> from targets
# targets = batch[1][1:]
# transposed_targets = targets.data.transpose(0, 1) # bsize x nwords
# pred = self.translator.translate(src)
# bpe_string = bpe_token + bpe_token + " "
# for b in range(len(pred)):
# ref_tensor = transposed_targets[b].tolist()
# decodedSent = tgt_dict.convertToLabels(pred[b], onmt.Constants.EOS)
# decodedSent = " ".join(decodedSent)
# decodedSent = decodedSent.replace(bpe_string, '')
# refSent = tgt_dict.convertToLabels(ref_tensor, onmt.Constants.EOS)
# refSent = " ".join(refSent)
# refSent = refSent.replace(bpe_string, '')
# Flush the pred and reference sentences to temp files
# outF.write(decodedSent + "\n")
# outF.flush()
# outRef.write(refSent + "\n")
# outRef.flush()
# compute bleu using external script
# bleu = moses_multi_bleu(outF.name, outRef.name)
# outF.close()
# outRef.close()
# bleu_scores[sid] = bleu
# after decoding, switch model back to training mode
# self.model.train()
# return bleu_scores
| 7,248
| 34.18932
| 95
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/gem_trainer.py
|
from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import numpy as np
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients, clip_grad_norm
from onmt.model_factory import build_model, optimize_model, init_model_parameters
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
from onmt.constants import add_tokenidx
import dill
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
import quadprog
from .mp_trainer import prepare_sample, generate_data_iterator, zero_tensor, Trainer
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp:
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp:
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def project2cone2(gradient, memories, margin=0.5, eps=1e-3):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
t = memories_np.shape[0]
P = np.dot(memories_np, memories_np.transpose())
P = 0.5 * (P + P.transpose()) + np.eye(t) * eps
q = np.dot(memories_np, gradient_np) * -1
G = np.eye(t)
h = np.zeros(t) + margin
v = quadprog.solve_qp(P, q, G, h)[0]
x = np.dot(v, memories_np) + gradient_np
gradient.copy_(torch.Tensor(x).to(gradient.device).view(-1, 1))
def is_factorized_param(p):
if p.endswith("r_i") or p.endswith("s_i"):
return True
if p.endswith("rm_i") or p.endswith("rm_o"):
return True
if p.endswith("sm_i") or p.endswith("sm_o"):
return True
if p.endswith("r_o") or p.endswith("s_o"):
return True
if p.endswith("r_p") or p.endswith("s_p"):
return True
if p.endswith("rm_p") or p.endswith("sm_p"):
return True
if p.endswith("r_q") or p.endswith("s_q") or p.endswith("r_kv") or p.endswith("s_kv"):
return True
if p.endswith("rm_q") or p.endswith("sm_q") or p.endswith("rm_kv") or p.endswith("sm_kv"):
return True
return False
class GEMTrainer(Trainer):
def __init__(self, device, train_data, valid_data, dicts, opt, constants=None, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
super(GEMTrainer, self).__init__(device, train_data, valid_data, dicts, opt,
constants=constants, setup_optimizer=setup_optimizer)
assert isinstance(train_data, list)
assert isinstance(valid_data, list)
assert(len(opt.train_sets) > 0)
assert(len(opt.train_set_orders) > 0)
assert(len(opt.train_set_orders) == len(opt.train_sets)), "The number of train sets and the number of orders must match"
self.print("[INFO] Preparing parameters for Gradient Episodic Memory")
self.gem_params = list()
self.gem_param_names = list()
self.gem_param_size = list()
self.ft_params = list()
for n, p in self.model.named_parameters():
if is_factorized_param(n):
self.ft_params.append(n)
else:
if p.requires_grad:
self.gem_params.append(p)
self.gem_param_names.append(n)
self.gem_param_size.append(p.numel())
self.print("[INFO] Done Preparing parameters.")
# print out the stuff
# for (gem_param, gem_param_name, gem_param_size) in zip(self.gem_params, self.gem_param_names, self.gem_param_size):
# print(gem_param_name, gem_param_size)
# exit()
self.orders = dict()
for order, train_set in zip(opt.train_set_orders, opt.train_sets):
if order not in self.orders:
self.orders[order] = list()
self.orders[order].append(train_set)
memory_size = len(self.orders)
self.grads = torch.Tensor(sum(self.gem_param_size), memory_size).cuda()
def eval(self, data):
self.print("[INFO] Running cross-entropy evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size, split_even=False,
dataset_ids=opt.valid_sets)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(data_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
if opt.load_pretrained_classifier:
self.classifier.eval()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
with torch.no_grad():
# while not data_iterator.end_of_epoch():
while i < len(epoch_iterator):
samples = next(epoch_iterator)
def maybe_no_sync():
if isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
if samples:
with maybe_no_sync():
with autocast(enabled=opt.fp16):
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce,
pretrained_layer_states=layer_states)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
correct, total = loss_dict['correct'], loss_dict['total']
# if total != batch.tgt_size:
# # print(batch.get('target').size())
# # print(batch.get('target_output').size())
# targets = batch.get('target_output')
# targets_ = targets.view(-1)
# non_pad_mask = torch.nonzero(targets_.ne(self.loss_function.padding_idx)).squeeze(1)
# labels = targets_.index_select(0, non_pad_mask)
# print(labels, labels.numel(), batch.tgt_size)
assert (total == batch.tgt_size), \
"Process %i, Minibatch %d/%d: Expected %d tokens from the batch, got %d" \
% (self.rank, i, data_size, batch.tgt_size, total)
# print(i, len(data_iterator), total, batch.tgt_size, loss_data)
total_loss.add_(loss_data)
total_words.add_(batch.tgt_size)
total_correct.add_(correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
if opt.load_pretrained_classifier:
self.classifier.train()
return total_loss.item() / total_words.item(), total_correct.item() / total_words.item()
def train_epoch(self, epoch, resume=False, itr_progress=None):
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
grad_norm = -1
memory_size = len(self.orders)
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# self.model.module.reset_states()
# note: for Training split_even=True
dataset = train_data
data_iterators = dict()
for order in self.orders:
# self.orders[order] contains the list of training datasets for order
# [0] is by default the currently (newest) added datasets
data_iterators[order] = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=self.orders[order])
data_iterator = data_iterators[0]
epoch_iterators = dict()
for order in self.orders:
# for the memory datasets, allow for reset_
reset_ = order != 0
epoch_iterators[order] = data_iterators[order].next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
epoch_iterator = epoch_iterators[0]
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_src_words = zero_tensor()
report_sents = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
streaming_state = None
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
i = i * self.world_size
while not data_iterator.end_of_epoch():
self.grads.zero_()
# TODO: Sampling samples from the memory datasets
for t in self.orders:
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
if t == 0:
continue
memory_data_iterator = epoch_iterators[t]
if not memory_data_iterator.has_next():
# reset
epoch_iterators[t] = data_iterators[order].next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
memory_data_iterator = epoch_iterators[t]
prev_samples = next(memory_data_iterator)
batch = prepare_sample(prev_samples, device=self.device)
targets = batch.get('target_output')
streaming_state = None
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
rev_loss_data = None
mirror_loss_data = 0
rec_loss_data = None
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# backward to get gradients (and synchronize between gpus)
self.grad_scaler.scale(full_loss).backward()
self.grad_scaler.unscale_(self.optim.optimizer)
store_grad(self.gem_params, self.grads, self.gem_param_size, order)
self.optim.optimizer.step(fake=True)
# self.grad_scaler.update()
# self.grad_scaler.step(self.optim.optimizer)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# zero model grads
# forward and backward pass
# synchronize the gradients and scale !!!!
# put them in the grads
# zero model grads
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
streaming_state = None
oom = zero_tensor()
counter = counter + 1
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
rev_loss_data = None
mirror_loss_data = 0
rec_loss_data = None
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
grad_list = [p for p in self.model.parameters() if p.requires_grad]
model_input = None
vanilla_logits = None
# grad scaler has to be done outside of the autocast
self.grad_scaler.scale(full_loss).backward(inputs=grad_list)
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
raise e
loss = 0
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = reduce
if update_flag:
# accumulated gradient case, in this case the update frequency
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0
self.grad_scaler.unscale_(self.optim.optimizer)
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
# grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
if grad_denom != 1:
normalize_gradients(self.model.parameters(), grad_denom)
# Update the pagrameters.
# grad_norm = clip_grad_norm(self.model.parameters(), self.opt.max_grad_norm)
with torch.no_grad():
t = 0
store_grad(self.gem_params, self.grads, self.gem_param_size, t)
indx = torch.arange(1, len(self.orders), device=self.gem_params[0].device)
dotp = torch.mm(self.grads[:, 0].unsqueeze(0),
self.grads.index_select(1, indx))
self.margin = 0.5
if (dotp < 0).sum() != 0:
project2cone2(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin)
overwrite_grad(self.gem_params, self.grads[:, t],
self.gem_param_size)
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if (opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every) \
or (num_updates >= opt.max_step):
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g percent' % (100 * valid_accuracy))
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy,
itr=data_iterator)
if num_updates >= opt.max_step:
print('[INFO] Max-training-step reached.')
exit(0)
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_sents.add_(1)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss.add_(rec_loss_data)
if opt.mirror_loss:
report_rev_loss.add_(rev_loss_data)
report_mirror_loss.add_(mirror_loss_data)
if opt.ctc_loss > 0.0:
report_ctc_loss.add_(ctc_loss_data)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_sents, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm))
if opt.mirror_loss:
self.all_reduce(report_rev_loss, op=dist.ReduceOp.SUM, group=self.group)
rev_ppl = math.exp(report_rev_loss.item() / report_tgt_words.item())
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
if opt.ctc_loss > 0.0:
# if torch.isinf(report_ctc_loss):
# report_ctc_loss.zero_()
# self.all_reduce(report_ctc_loss, op=dist.ReduceOp.SUM, group=self.group)
ctc_loss = report_ctc_loss.item() / report_tgt_words.item()
log_string += (" ctcloss: %8.2f ; " % ctc_loss)
if opt.contrastive_loss_coeff > 0.0:
#
ctv_loss = report_contrastive_loss.item() / report_tgt_words.item()
log_string += (" ctv_loss: %8.2f ; " % ctv_loss)
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
# report_sents.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
def run(self, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) + 1 if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
if opt.estimate_fisher_information:
self.start_time = time.time()
self.estimate_fisher(self.train_data)
return
if opt.run_validation_before_training or opt.max_step <= 0:
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
# percent is never used in plural :)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
if opt.max_step <= 0:
if self.is_main():
self.save(0, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
return
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
self.save(epoch, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
itr_progress = None
resume = False
| 30,109
| 40.077763
| 128
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/mp_trainer.py
|
from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients, clip_grad_norm
from onmt.model_factory import build_model, optimize_model, init_model_parameters
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
from onmt.constants import add_tokenidx
import dill
from multiprocessing.managers import ListProxy as ListProxy
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
def prepare_sample(batch, device=None):
"""
Put minibatch on the corresponding GPU
:param batch:
:param device:
:return:
"""
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
batch.cuda(fp16=False, device=device)
return batch
def is_list(object):
if isinstance(object, list):
return True
elif isinstance(object, ListProxy):
return True
return False
def generate_data_iterator(dataset, rank, world_size, seed,
num_workers=1, epoch=1., buffer_size=0, split_even=True,
dataset_ids=None):
# check if dataset is a list:
if is_list(dataset):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank, split_even=split_even,
dataset_ids=dataset_ids)
else:
data_iterator = DataIterator(dataset, dataset.get_collater(), dataset.get_batches(), seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank, split_even=split_even)
return data_iterator
def zero_tensor(device=None):
if device is None:
return torch.Tensor([0]).cuda()
else:
return torch.Tensor([0]).to(device)
def all_reduce_and_rescale_tensors(tensors, rescale_denom=1,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset + numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset + numel])
offset += numel
with torch.no_grad():
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
class Trainer(object):
# def __init__(self, device, train_data, valid_data, dicts, opt, constants=None, setup_optimizer=True):
def __init__(self, device, dicts, opt, constants=None, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
self.device = device
opt.node_rank = 0
opt.nodes = 1
self.world_size = len(opt.gpus)
self.constants = dill.loads(constants) if constants is not None else None
# in the case of single node distributed, it should equal self.device
self.rank = self.device
# make a group to later use with self.all_reduce
self.group = dist.group.WORLD
self.print("[INFO] Training Options:", opt)
if self.world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://', world_size=self.world_size, rank=self.rank)
self.model = None
self.dicts = dicts
self.opt = opt
self.cuda = (len(opt.gpus) >= 1 and opt.gpus[0] >= 0)
if self.cuda:
torch.cuda.set_device(self.device)
assert self.cuda, "[ERROR] Training is only available on GPUs."
self.start_time = 0
torch.manual_seed(self.opt.seed)
# note: we must start creating models after ccreating the processes
# for some reason passing a pre-created model to a process creates a "pickle" error
if self.is_main():
print("[INFO] Building models .... ", flush=True)
print("Languages: ", dicts['langs'], flush=True)
model = build_model(opt, dicts, False, self.constants)
""" Building the loss function """
tgt_pad = dicts['tgt_pad']
if opt.ctc_loss > 0.0:
from onmt.speech.ctc_loss import CTC
self.ctc_loss_function = CTC(dicts['tgt'].size(), opt.model_size, 0.0, reduce=True,
padding_idx=tgt_pad, blank_idx=0)
if opt.predict_language:
from onmt.models.speech_recognizer.lid_loss import CrossEntropyLIDLoss
self.lid_loss_function = CrossEntropyLIDLoss(opt.n_languages, label_smoothing=0.0)
if opt.nce:
from onmt.modules.nce.nce_loss import NCELoss
loss_function = NCELoss(opt.model_size, dicts['tgt'].size(), noise_ratio=opt.nce_noise,
logz=9, label_smoothing=opt.label_smoothing)
else:
loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(),
label_smoothing=opt.label_smoothing,
mirror=opt.mirror_loss,
padding_idx=tgt_pad)
# This function replaces modules with the more optimized counterparts so that it can run faster
# Currently exp with LayerNorm
# distributed is required to convert BatchNorm to SyncBatchNorm for DDP
optimize_model(model, distributed=(self.world_size > 1))
if opt.load_pretrained_classifier:
from onmt.model_factory import build_classifier
self.print("Loading pretrained external classifier ...", flush=True)
classifier_checkpoint = torch.load(opt.load_pretrained_classifier,
map_location=lambda storage, loc: storage)
classifier_opt = classifier_checkpoint['opt']
classifier_dicts = classifier_checkpoint['dicts']
self.classifier = build_classifier(classifier_opt, classifier_dicts)
self.classifier.load_state_dict(classifier_checkpoint['model'])
init_model_parameters(model, opt)
self.model = model
self.loss_function = loss_function
self.grad_scaler = torch.cuda.amp.GradScaler()
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
try:
self.model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
self.model.load_state_dict(checkpoint['model'], strict=True)
# if 'scaler' in checkpoint and checkpoint['scaler'] is not None:
# self.grad_scaler.load_state_dict(checkpoint['scaler'])
if self.cuda:
self.loss_function = self.loss_function.cuda(device=self.device)
self.model = self.model.cuda(device=self.device)
if opt.ctc_loss > 0.0:
self.ctc_loss_function = self.ctc_loss_function.cuda(device=self.device)
if opt.load_pretrained_classifier:
self.classifier = self.classifier.cuda(device=self.device)
# Ensure that the distributed copies have the same initial parameters
# Manual seed may not work the same for different GPU models.
# if self.world_size > 1:
# params = [p for p in self.model.parameters()]
#
# with torch.no_grad():
# if not self.is_main():
# # zero everything except for the main model
# for p in params:
# p.zero_()
# else:
# for p in params:
# p.add_(0)
# run all_reduce to ensure that all models have exactly the same parameters
# if self.world_size > 1:
# params = [p for p in self.model.parameters()]
# all_reduce_and_rescale_tensors(params, 1)
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if self.is_main():
print("[INFO] Optimizer: ", self.optim.optimizer)
if opt.load_from and not opt.reset_optim:
if 'optim' in checkpoint and checkpoint['optim'] is not None and not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if opt.starting_step > 0:
print("[INFO] Optimizer starting from state %d " % opt.starting_step)
self.optim.set_starting_step(opt.starting_step)
if self.world_size > 1:
find_unused_parameters = opt.find_unused_parameters
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused_parameters)
if self.is_main():
nparams = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("[INFO] Total number of trainable paramaters: %d" % nparams)
nparams = sum(p.numel() for p in model.parameters())
print("[INFO] Total number of paramaters: %d" % nparams)
if opt.load_fisher:
if self.is_main():
print("[INFO] Loading fisher information from: %s" % opt.load_fisher)
self.fisher_info = torch.load(opt.load_fisher, map_location=lambda storage, loc: storage)
if self.cuda:
for n in self.fisher_info['mean']:
self.fisher_info['mean'][n] = self.fisher_info['mean'][n].cuda()
for n in self.fisher_info['fisher_diag']:
self.fisher_info['fisher_diag'][n] = self.fisher_info['fisher_diag'][n].cuda()
else:
self.fisher_info = None
print("[INFO] Process %d ready." % self.rank, flush=True)
def is_main(self):
return self.rank == 0
def all_reduce(self, tensor, **kwargs):
if self.world_size > 1:
dist.all_reduce(tensor, **kwargs)
return
def print(self, *content, flush=False):
"""
A helper function to print only on the main process
:param flush:
:param content:
:return:
"""
if self.is_main():
print(*content, flush=flush)
else:
return
def load_encoder_weight(self, checkpoint_file, wav2vec=False):
if not wav2vec:
print("Loading pretrained Encoder Weights from %s" % checkpoint_file, flush=True)
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
pretrained_model = build_model(checkpoint['opt'], checkpoint['dicts'], False, self.constants)
pretrained_model.load_state_dict(checkpoint['model'])
model = self.model.module if self.world_size > 1 else self.model
model.load_encoder_weights(pretrained_model)
else:
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
model = self.model.module if self.world_size > 1 else self.model
model.load_encoder_weights(checkpoint)
return
def load_decoder_weight(self, checkpoint_file):
self.print("Loading pretrained models from %s" % checkpoint_file)
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
chkpoint_dict = checkpoint['dicts']
pretrained_model = build_model(checkpoint['opt'], chkpoint_dict, False, self.constants)
pretrained_model.load_state_dict(checkpoint['model'])
self.print("Loading pretrained decoder weights ...")
# first we have to remove the embeddings which probably have difference size ...
pretrained_word_emb = pretrained_model.decoder.word_lut
pretrained_model.decoder.word_lut = None
pretrained_lang_emb = pretrained_model.decoder.language_embeddings
pretrained_model.decoder.language_embeddings = None
# actually we assume that two decoders have the same language embeddings...
untrained_word_emb = self.model.decoder.word_lut
self.model.decoder.word_lut = None
untrained_lang_emb = self.model.decoder.language_embeddings
self.model.decoder.language_embeddings = None
decoder_state_dict = pretrained_model.decoder.state_dict()
self.model.decoder.load_state_dict(decoder_state_dict)
# now we load the embeddings ....
n_copies = 0
for token in self.dicts['tgt'].labelToIdx:
untrained_id = self.dicts['tgt'].labelToIdx[token]
if token in chkpoint_dict['tgt'].labelToIdx:
pretrained_id = chkpoint_dict['tgt'].labelToIdx[token]
untrained_word_emb.weight.data[untrained_id].copy_(pretrained_word_emb.weight.data[pretrained_id])
self.model.generator[0].linear.bias.data[untrained_id].copy_(pretrained_model
.generator[0].linear.bias.data[
pretrained_id])
n_copies += 1
self.print("Copied embedding for %d words" % n_copies)
self.model.decoder.word_lut = untrained_word_emb
# now we load the language embeddings ...
if pretrained_lang_emb and untrained_lang_emb and 'langs' in chkpoint_dict:
for lang in self.dicts['langs']:
untrained_id = self.dicts['langs'][lang]
if lang in chkpoint_dict['langs']:
pretrained_id = chkpoint_dict['langs'][lang]
untrained_lang_emb.weight.data[untrained_id].copy_(pretrained_lang_emb.weight.data[pretrained_id])
self.model.decoder.language_embeddings = untrained_lang_emb
def warm_up(self, train_data):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
batch = train_data[0].get_largest_batch(bsz=-1, src_size=-1, tgt_size=-1) \
if is_list(train_data) \
else train_data.get_largest_batch(bsz=328, src_size=319520, tgt_size=18)
opt = self.opt
if self.cuda:
batch.cuda(fp16=False)
self.model.train()
self.loss_function.train()
loss = 0
for p in self.model.parameters():
loss = loss + p.sum() * 0
# this will create zero grads
loss.backward()
# self.model.zero_grad()
oom = False
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
# try:
with autocast(enabled=opt.fp16):
targets = batch.get('target_output')
tgt_mask = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
if opt.predict_lang:
lid_loss = loss_dict['lid']
full_loss = full_loss + lid_loss
lid_loss_data = lid_loss.item()
else:
lid_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
if opt.lfv_multilingual:
lid_logits = outputs['lid_logits']
lid_labels = batch.get('target_lang')
lid_loss_function = self.loss_function.get_loss_function('lid_loss')
lid_loss = lid_loss_function(lid_logits, lid_labels)
full_loss = full_loss + lid_loss
optimizer = self.optim.optimizer
# Warning: self-defined parameter list
parameter_list = [p for p in self.model.parameters() if p.requires_grad]
# Later if we need to do Adversarial Perturbation:
self.grad_scaler.scale(full_loss).backward()
loss = 0
for p in parameter_list:
loss += p.sum() * 0.0
loss.backward()
for p in self.model.parameters():
if p.grad is not None:
p.grad.data.zero_()
# self.model.zero_grad()
# self.optim.zero_grad()
# self.optim.step()
# self.optim.reset()
# except RuntimeError as e:
# if 'out of memory' in str(e):
# oom = True
# # else:
# print("[INFO] Warning: out-of-memory in warming up. "
# "This is due to the largest batch is too big for the GPU.",
# flush=True)
# raise e
# else:
self.print("[INFO] Warming up successfully.", flush=True)
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state_dict = self.model.module.state_dict()
else:
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'scaler': self.grad_scaler.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
self.print("[INFO] Running cross-entropy evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size, split_even=False,
dataset_ids=opt.valid_sets)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(data_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
if opt.load_pretrained_classifier:
self.classifier.eval()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
with torch.no_grad():
# while not data_iterator.end_of_epoch():
while i < len(epoch_iterator):
samples = next(epoch_iterator)
def maybe_no_sync():
if isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
if samples:
with maybe_no_sync():
with autocast(enabled=opt.fp16):
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce,
pretrained_layer_states=layer_states)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
correct, total = loss_dict['correct'], loss_dict['total']
# if total != batch.tgt_size:
# # print(batch.get('target').size())
# # print(batch.get('target_output').size())
# targets = batch.get('target_output')
# targets_ = targets.view(-1)
# non_pad_mask = torch.nonzero(targets_.ne(self.loss_function.padding_idx)).squeeze(1)
# labels = targets_.index_select(0, non_pad_mask)
# print(labels, labels.numel(), batch.tgt_size)
assert (total == batch.tgt_size), \
"Process %i, Minibatch %d/%d: Expected %d tokens from the batch, got %d" \
% (self.rank, i, data_size, batch.tgt_size, total)
# print(i, len(data_iterator), total, batch.tgt_size, loss_data)
total_loss.add_(loss_data)
total_words.add_(batch.tgt_size)
total_correct.add_(correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
if opt.load_pretrained_classifier:
self.classifier.train()
return total_loss.item() / total_words.item(), total_correct.item() / total_words.item()
def train_epoch(self, train_data, valid_data, epoch, resume=False, itr_progress=None):
opt = self.opt
streaming = opt.streaming
grad_norm = -1
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# self.model.module.reset_states()
# note: for Training split_even=True
dataset = train_data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=opt.train_sets)
# TODO: fix resume which is currently buggy
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_ewc_loss = zero_tensor()
report_ewc_count = 0
report_src_words = zero_tensor()
report_sents = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
report_enc_lid_loss = zero_tensor()
report_enc_lid_count = 0
report_dec_lid_loss = zero_tensor()
report_dec_lid_count = 0
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
ewc_importance = opt.ewc_importance
if ewc_importance > 0:
assert self.fisher_info is not None
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
# parameters = {n: p for n, p in model.named_parameters() if p.requires_grad}
parameters = dict()
for n, p in model.named_parameters():
if n in self.fisher_info['mean'] and p.requires_grad:
parameters[n] = p
i = data_iterator.iterations_in_epoch if not is_list(train_data) else epoch_iterator.n_yielded
i = i * self.world_size
while not data_iterator.end_of_epoch():
# curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
if opt.streaming:
if train_data.is_new_stream():
streaming_state = self.model.init_stream()
else:
streaming_state = None
# TODO: dealing with oom during distributed training
oom = zero_tensor()
counter = counter + 1
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
if opt.predict_language:
enc_pred_lang = outputs['enc_pred_lang']
enc_mask = outputs['src_mask']
enc_lid_loss = self.lid_loss_function(enc_pred_lang, batch.get("source_lang"), enc_mask)
dec_pred_lang = outputs['dec_pred_lang']
# dec_mask = outputs['target_mask']
# dec_mask = targets.eq(onmt.constants.PAD)
dec_mask = batch.get('target_input_selfattn_mask')
dec_lid_loss = self.lid_loss_function(dec_pred_lang, batch.get("target_lang"), dec_mask)
full_loss = full_loss + 0.01 * (enc_lid_loss + dec_lid_loss)
report_enc_lid_loss.add_(enc_lid_loss.item())
report_enc_lid_count += enc_mask.ne(1).int().sum().item()
# print(dec_mask)
# print(dec_mask.ne(1).int().sum().item())
report_dec_lid_loss.add_(dec_lid_loss.item())
report_dec_lid_count += dec_mask.ne(1).int().sum().item()
else:
enc_lid_loss = None
enc_lid_loss_data = None
dec_lid_loss = None
dec_lid_loss_data = None
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.contrastive_loss_coeff > 0 and 'contrastive_loss' in outputs:
contrastive_loss = outputs['contrastive_loss']
full_loss = full_loss + opt.contrastive_loss_coeff * contrastive_loss
report_contrastive_loss.add_(contrastive_loss.item())
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# ewc_penalty = ewc_penalty + (torch.square(parameters[n] - self.fisher_info['mean'][n]) *
# self.fisher_info['fisher_diag'][n]).sum()
# full_loss += ewc_penalty * ewc_importance
# TODO for adversarial:
grad_list = [p for p in self.model.parameters() if p.requires_grad]
if opt.virtual_adversarial_training_mode > 0:
# if we use virtual adversarial training: add the input to the list of gradient to take
model_input = outputs['source']
vanilla_logits = outputs['logprobs']
grad_list += [model_input]
else:
model_input = None
vanilla_logits = None
# grad scaler has to be done outside of the autocast
self.grad_scaler.scale(full_loss).backward()
# del outputs
if opt.virtual_adversarial_training_mode > 0:
# run forward pass one more time
# the perturbation is the gradient of the model w.r.t the input
perturb = model_input.grad.data.new(*model_input.size()).copy_(model_input.grad.data)
with autocast(enabled=opt.fp16):
assert model_input.grad is not None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
pretrained_layer_states=layer_states,
input_ptb=perturb)
full_loss = None
# compute loss for mode 2 3
# In this mode, we add noise to the input and minimise the loss given the noisy inputs
if opt.virtual_adversarial_training_mode in [2, 3]:
loss_dict = self.loss_function(outputs, targets, model=self.model)
full_loss = loss_dict['loss']
# for mode 1, 3 compute kl divergence
# In this mode, we minimise the kl divergence between the model output with and without noise
if opt.virtual_adversarial_training_mode in [1, 3]:
logits = outputs['logprobs']
with torch.no_grad():
vanilla_probs = \
F.softmax(vanilla_logits.float().view(-1, vanilla_logits.size(-1)), dim=-1)
vanilla_probs.detach_()
noisy_probs = F.softmax(logits.float().view(-1, logits.view(-1, logits.size(-1))),
dim=-1)
# Note: with the kl_div_loss we don't backward w.r.t the vanilla probs
kl_div_loss = F.kl_div(noisy_probs, vanilla_probs, reduction='sum')
if full_loss is None:
full_loss = kl_div_loss
else:
full_loss += kl_div_loss
# Now we only get the gradients for the weights of the network
grad_list = [p for p in self.model.parameters() if p.requires_grad]
self.grad_scaler.scale(full_loss).backward()
del outputs
# EWC training: no need for autograd here?
if self.optim._step % opt.ewc_decay_every == 0:
ewc_importance = ewc_importance / opt.ewc_decay_scale
# only run this ewc everytime we reduce
# if isinstance(self.model, DDP_model):
# torch.cuda.synchronize(device=self.rank)
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
# recovering mechanism doesn't work at the moment
# loss = 0
# for p in self.model.parameters():
# if p.grad is not None:
# del p.grad # free some memory
# loss = loss + p.sum() * 0
# torch.cuda.empty_cache()
#
# if opt.streaming: # reset stream in this case ...
# streaming_state = self.model.init_stream()
#
#
# # backward to actually free the graph
# # self.grad_scaler.scale(loss).backward()
# oom.add_(1)
raise e
# connecting the oom signal from different gpus
# self.all_reduce(oom, op=dist.ReduceOp.SUM, group=self.group)
# # if OOM: all gpus reset grad and reset counter
# # or maybe all-reduce grad?
# if oom.item() > 0:
# # reset counter
# self.model.zero_grad()
# self.optim.zero_grad()
# counter = 0
# oom.zero_()
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = reduce
if update_flag:
# accumulated gradient case, in this case the update frequency
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0
self.grad_scaler.unscale_(self.optim.optimizer)
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
else:
grad_denom = 1
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
# grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
if grad_denom != 1:
normalize_gradients(self.model.parameters(), grad_denom)
# Update the pagrameters.
grad_norm = clip_grad_norm(self.model.parameters(), self.opt.max_grad_norm)
if ewc_importance > 0:
ewc_penalty = 0
if self.optim._step >= opt.ewc_delay:
# if at the moment weights/gradients/mean and fisher_diag are all the same and unscaled
# then we don't need to synchronize the gradients
with self.model.no_sync():
for n, p in self.model.named_parameters():
if isinstance(self.model, DDP_model):
n = n[len("module."):]
if n in self.fisher_info['mean']:
penalty = self.fisher_info['fisher_diag'][n] * \
torch.square(p - self.fisher_info['mean'][n].data)
ewc_penalty = ewc_penalty + penalty.sum()
loss = ewc_penalty * ewc_importance
ewc_loss = ewc_penalty.item()
# accumulate the gradients from EWC loss
loss.backward()
report_ewc_loss.add_(ewc_loss)
report_ewc_count += 1
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if (opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every) \
or (num_updates >= opt.max_step):
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g percent' % (100 * valid_accuracy))
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy,
itr=data_iterator)
if num_updates >= opt.max_step:
print('[INFO] Max-training-step reached.')
exit(0)
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_sents.add_(1)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss.add_(rec_loss_data)
if opt.mirror_loss:
report_rev_loss.add_(rev_loss_data)
report_mirror_loss.add_(mirror_loss_data)
if opt.ctc_loss > 0.0:
report_ctc_loss.add_(ctc_loss_data)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_ewc_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_sents, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm))
# if opt.reconstruct:
# self.all_reduce(report_rec_loss, op=dist.ReduceOp.SUM, group=self.group)
# rec_ppl = math.exp(report_rec_loss.item() / report_src_words.item())
# log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
self.all_reduce(report_rev_loss, op=dist.ReduceOp.SUM, group=self.group)
rev_ppl = math.exp(report_rev_loss.item() / report_tgt_words.item())
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
if opt.ctc_loss > 0.0:
# if torch.isinf(report_ctc_loss):
# report_ctc_loss.zero_()
# self.all_reduce(report_ctc_loss, op=dist.ReduceOp.SUM, group=self.group)
ctc_loss = report_ctc_loss.item() / report_tgt_words.item()
log_string += (" ctcloss: %8.2f ; " % ctc_loss)
if opt.contrastive_loss_coeff > 0.0:
#
ctv_loss = report_contrastive_loss.item() / report_tgt_words.item()
log_string += (" ctv_loss: %8.2f ; " % ctv_loss)
if ewc_importance > 0.0:
try:
_ewc_loss = report_ewc_loss.item() / report_ewc_count
except ZeroDivisionError:
_ewc_loss = float('nan')
log_string += (" ewcloss: %8.8f ; " % _ewc_loss)
if opt.predict_language:
try:
_enc_lid_loss = report_enc_lid_loss.item() / report_enc_lid_count
_dec_lid_loss = report_dec_lid_loss.item() / report_dec_lid_count
except ZeroDivisionError:
_enc_lid_loss = float('nan')
_dec_lid_loss = float('nan')
log_string += (" enc_lidloss: %8.8f ; " % _enc_lid_loss)
log_string += (" dec_lidloss: %8.8f ; " % _dec_lid_loss)
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
report_ewc_loss.zero_()
report_ewc_count = 0
# report_sents.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
def estimate_fisher(self, data):
"""
This function estimates the Fisher Information (only diagonal) on a data
:param data: train or dev data
:return: fisher
"""
def is_factorize_params(p_name):
# feed forward neural net
if p_name.endswith(".r_i") or p_name.endswith(".s_i") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_p") or p_name.endswith(".s_p"):
return True
if p_name.endswith(".r_q") or p_name.endswith(".s_q") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_kv") or p_name.endswith(".s_kv"):
return True
if p_name.endswith(".rm_q") or p_name.endswith(".sm_q") \
or p_name.endswith(".rm_o") or p_name.endswith(".sm_o") \
or p_name.endswith(".rm_kv") or p_name.endswith(".sm_kv"):
return True
if p_name.endswith(".sub_r_i") or p_name.endswith(".sub_s_i") \
or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
or p_name.endswith(".sub_r_p") or p_name.endswith(".sub_s_p"):
return True
if p_name.endswith(".sub_r_q") or p_name.endswith(".sub_s_q") \
or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
or p_name.endswith(".sub_r_kv") or p_name.endswith(".sub_s_kv"):
return True
if p_name.endswith(".sub_rm_q") or p_name.endswith(".sub_sm_q") \
or p_name.endswith(".sub_rm_o") or p_name.endswith(".sub_sm_o") \
or p_name.endswith(".sub_rm_kv") or p_name.endswith(".sub_sm_kv"):
return True
if p_name.endswith(".rm_i") or p_name.endswith(".sm_i") or \
p_name.endswith(".rm_o") or p_name.endswith(".sm_o") or \
p_name.endswith(".rm_p") or p_name.endswith(".sm_p"):
return True
if p_name.endswith(".sub_rm_i") or p_name.endswith(".sub_sm_i") or \
p_name.endswith(".sub_rm_o") or p_name.endswith(".sub_sm_o") or \
p_name.endswith(".sub_rm_p") or p_name.endswith(".sub_sm_p"):
return True
if "adapter" in p_name:
return True
return False
if self.rank == 0:
print("[INFO] Estimating fisher information ...\n")
opt = self.opt
epoch = 0
assert len(opt.load_from) > 0
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=False)
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
parameters = {n: p for n, p in model.named_parameters() if p.requires_grad}
precision_matrices = dict()
for n, p in parameters.items():
if not is_factorize_params(n):
precision_matrices[n] = torch.zeros_like(p)
# note: for Training split_even=True
dataset = data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=0, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=opt.train_sets)
streaming = False
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_src_words = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch if not is_list(dataset) else epoch_iterator.n_yielded
i = i * self.world_size # incorrect?
self.model.train() # eliminate dropout (is it necessary)?
while not data_iterator.end_of_epoch():
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
if opt.streaming:
if train_data.is_new_stream():
streaming_state = self.model.init_stream()
else:
streaming_state = None
# TODO: dealing with oom during distributed training
oom = zero_tensor()
counter = counter + 1
# reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
reduce = False # never reduce :))))
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.contrastive_loss_coeff > 0 and 'contrastive_loss' in outputs:
contrastive_loss = outputs['contrastive_loss']
full_loss = full_loss + opt.contrastive_loss_coeff * contrastive_loss
report_contrastive_loss.add_(contrastive_loss.item())
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# grad scaler has to be done outside of the autocast
# TODO for adversarial:
grad_list = [p for p in self.model.parameters() if p.requires_grad]
if opt.virtual_adversarial_training_mode > 0:
# if we use virtual adversarial training: add the input to the list of gradient to take
model_input = outputs['source']
vanilla_logits = outputs['logprobs']
grad_list += [model_input]
else:
model_input = None
vanilla_logits = None
self.grad_scaler.scale(full_loss).backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
# always raise the error
raise e
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# unscale the gradient first
self.grad_scaler.unscale_(self.optim.optimizer)
# fake update. we need a learning rate = 0 for this
grad_norm = clip_grad_norm(self.model.parameters(), 0)
# self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
# Update the precision matrices.
for n, p in parameters.items():
if n in precision_matrices:
grad = p.grad.data
grad.masked_fill_(torch.logical_or(torch.isinf(grad), torch.isnan(grad)), 0)
precision_matrices[n].add_(torch.square(p.grad.data))
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f; gradscaler: %9.9f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm,
self.grad_scaler.get_scale()))
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
if isinstance(self.model, DDP_model):
torch.cuda.synchronize(device=self.rank)
loss = 0
for n, p in parameters.items():
loss = loss + p.sum() * 0
# to force ddp to synchronize the last time (based on a zero loss -> zero grad
loss.backward()
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
if self.world_size > 1:
if self.rank == 0:
print("[INFO] Synchronizing precision matrices")
for n in precision_matrices:
self.all_reduce(precision_matrices[n], op=dist.ReduceOp.SUM, group=self.group)
if self.rank == 0:
print("Done...")
if self.rank == 0:
# Accumulate fisher info from previous iteration
if self.fisher_info is not None:
print("[INFO] Accumulating fisher information from a previous iteration...")
for n in precision_matrices:
if n in self.fisher_info:
precision_matrices[n] = self.fisher_info['fisher_diag'][n] + precision_matrices[n]
# normalizing by the number of sentences
# for n in precision_matrices:
# precision_matrices[n].div_(num_d_sents)
means = dict()
for n, p in parameters.items():
if n in precision_matrices:
means[n] = p
checkpoint = {
'mean': means,
'fisher_diag': precision_matrices,
'opt': opt
}
file_name = opt.load_from + ".fisher"
print("[INFO] Saving means and fisher information to %s" % file_name)
torch.save(checkpoint, file_name)
return total_loss / total_words
def run(self, train_data=None, valid_data=None, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) + 1 if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
# if self.cuda:
# self.warm_up(train_data=train_data)
if opt.estimate_fisher_information:
self.start_time = time.time()
self.estimate_fisher(train_data)
return
if opt.run_validation_before_training or opt.max_step <= 0:
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
# percent is never used in plural :)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
if opt.max_step <= 0:
if self.is_main():
self.save(0, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
return
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(train_data, valid_data, epoch,
resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
self.save(epoch, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
itr_progress = None
resume = False
| 68,654
| 42.452532
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/train_utils/meters.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = None
self.sum = 0
self.count = 0
def is_valid(self):
return self.count > 0
def update(self, val, n=1):
if val is not None:
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| 1,838
| 22.576923
| 78
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/perplexity_scorer.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['transformer', 'stochastic_transformer']
class PerplexityScorer(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
def scoreBatch(self, batch):
with torch.no_grad():
return self._scoreBatch(batch)
def _scoreBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
return gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
decoder_output = self.models[i].step(tokens, decoder_states[i])
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
gold_score, gold_words, allgold_words = self.scoreBatch(batch)
return gold_score, gold_words, allgold_words
| 3,625
| 30.258621
| 80
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/stream_translator.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class StreamTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.decoder_states = defaultdict(lambda: None)
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if hasattr(opt, 'dynamic_min_len_scale'):
self.dynamic_min_len_scale = opt.dynamic_min_len_scale
else:
self.dynamic_min_len_scale = 0.8
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
self.max_memory_size = opt.max_memory_size
for i in range(len(self.models)):
self.models[i].set_memory_size(self.max_memory_size, self.max_memory_size)
def reset_stream(self):
self.decoder_states = defaultdict(lambda: None)
def translateBatch(self, batch):
with torch.no_grad():
return self._translateBatch(batch)
def _translateBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
prefix_tokens = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
for i in range(self.n_models):
# decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, streaming=False)
self.decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size,
previous_decoding_state=self.decoder_states[i],
streaming=True)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = min(math.ceil(int(src_len) * self.dynamic_max_len_scale), self.opt.max_sent_length)
min_len = math.ceil(int(src_len) * self.dynamic_min_len_scale)
else:
min_len = self.min_len
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
self.decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
lprobs, avg_attn_scores = self._decode(decode_input, self.decoder_states)
avg_attn_scores = None
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.bos] = -math.inf # never select bos ...
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# if prefix_tokens is not None and step < prefix_tokens.size(1):
# prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
# prefix_mask = prefix_toks.ne(self.pad)
# lprobs[prefix_mask] = -math.inf
# lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
# -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
# )
# # if prefix includes eos, then we should make sure tokens and
# # scores are the same across all beams
# eos_mask = prefix_toks.eq(self.eos)
# if eos_mask.any():
# # validate that the first beam matches the prefix
# first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
# eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
# target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
# assert (first_beam == target_prefix).all()
#
# def replicate_first_beam(tensor, mask):
# tensor = tensor.view(-1, beam_size, tensor.size(-1))
# tensor[mask] = tensor[mask][:, :1, :]
# return tensor.view(-1, tensor.size(-1))
#
# # copy tokens, scores and lprobs from the first beam to all beams
# tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
# scores = replicate_first_beam(scores, eos_mask_batch_dim)
# lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
# if prefix_tokens is not None:
# prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
# self.decoder_states = defaultdict(lambda : None)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# streaming = True in this case
decoder_output = self.models[i].step(tokens, decoder_states[i], streaming=True)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 22,096
| 41.250478
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/Beam.py
|
from __future__ import division
import torch
import onmt
"""
Class for managing the internals of the beam search process.
hyp1-hyp1---hyp1 -hyp1
\ /
hyp2 \-hyp2 /-hyp2hyp2
/ \
hyp3-hyp3---hyp3 -hyp3
========================
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, bos_id, cuda=False, sampling=False):
self.size = size
self.done = False
if sampling:
self.size = 1
self.sampling = sampling
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.allScores = []
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
# self.nextYs = [self.tt.LongTensor(size).fill_(onmt.constants.PAD)]
self.nextYs = [self.tt.LongTensor(size).fill_(onmt.constants.TGT_PAD)]
# self.nextYs[0][0] = onmt.Constants.BOS
self.nextYs[0][0] = bos_id
# The attentions (matrix) for each time.
self.attn = []
def getCurrentState(self):
"Get the outputs for the current timestep."
return self.nextYs[-1]
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk, attnOut):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
# print(flatBeamLk.size())
# print(wordLk.size())
if not self.sampling:
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
else:
# because wordLk is log prob, exp to get distribution
probs = torch.exp(wordLk)
# print(probs.size())
bestScoresId = torch.multinomial(probs, 1).squeeze(1) # K x 1 to K
# print(bestScoresId, bestScoresId.size())
bestScores = flatBeamLk[bestScoresId]
# multinomial sampling
self.allScores.append(self.scores)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId.floor_divide(numWords)
self.prevKs.append(prevK)
self.nextYs.append(bestScoresId - prevK * numWords)
self.attn.append(attnOut.index_select(0, prevK))
# End condition is when top-of-beam is EOS.
if self.nextYs[-1][0] == onmt.constants.EOS:
self.done = True
self.allScores.append(self.scores)
return self.done
def sortBest(self):
return torch.sort(self.scores, 0, True)
def getBest(self):
"Get the score of the best in the beam."
scores, ids = self.sortBest()
return scores[1], ids[1]
def getHyp(self, k):
"""
Walk back to construct the full hypothesis.
Parameters.
* `k` - the position in the beam to construct.
Returns.
1. The hypothesis
2. The attention at each time step.
"""
hyp, attn = [], []
lengths = []
for j in range(len(self.prevKs) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
attn.append(self.attn[j][k])
k = self.prevKs[j][k]
length = len(hyp)
return hyp[::-1], torch.stack(attn[::-1]), length
| 4,071
| 28.085714
| 80
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/predictor.py
|
import onmt
import onmt.modules
import torch
from onmt.model_factory import build_classifier
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Predictor(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.fp16 = opt.fp16
self.attributes = opt.attributes # attributes split by |. for example: de|domain1
self.src_lang = opt.src_lang
self.tgt_lang = opt.tgt_lang
if self.attributes:
self.attributes = self.attributes.split("|")
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model_path in enumerate(models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_state_dict"):
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
self.main_model_opt = model_opt
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
self.bos_token = model_opt.tgt_bos_word
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
# self.src_dict = self.tgt_dict
self.tgt_dict = checkpoint['dicts']['tgt']
print(self.tgt_dict.idxToLabel)
if "langs" in checkpoint["dicts"]:
self.lang_dict = checkpoint['dicts']['langs']
else:
self.lang_dict = {'src': 0, 'tgt': 1}
# self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
model = build_classifier(model_opt, checkpoint['dicts'])
# optimize_model(model)
if opt.verbose:
print('Loading model from %s' % model_path)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
# model.convert_autograd()
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print("[INFO] fbgemm is not found in the available engines. Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
# convert the custom functions to their autograd equivalent first
model.convert_autograd()
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, checkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None:
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
# self.models[0].decoder.renew_buffer(posSize)
# self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0], model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
self.models[0].autoencoder = self.autoencoder
if opt.verbose:
print('Done')
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data,
batch_size_words=sys.maxsize,
data_type=self._type, batch_size_sents=self.opt.batch_size)
def classify_batch(self, batches, sub_batches=None):
with torch.no_grad():
return self._classify_batch(batches, sub_batches=sub_batches)
def _classify_batch(self, batches, sub_batches):
batch = batches[0]
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# decoder output contains the log-prob distribution of the next step
# decoder_output = self.models[i].step(tokens, decoder_states[i])
model_outputs = self.models[i](batches[i])
logits = model_outputs['logits']
mask = model_outputs['src_mask']
mask = mask.squeeze(1).transpose(0, 1)
mask = mask.unsqueeze(-1)
logits.masked_fill_(mask, 0)
lengths = (1 - mask.long()).squeeze(-1).sum(dim=0, keepdim=False)
clean_logits = logits.sum(dim=0, keepdim=False).div(lengths.unsqueeze(-1))
probs = F.softmax(clean_logits.float(), dim=-1)
outs[i] = probs
probs = sum(outs.values())
probs.div_(self.n_models)
return probs
def build_data(self, src_sents, tgt_sents, type='mt', past_sents=None):
# This needs to be the same as preprocess.py.
if type == 'mt':
raise NotImplementedError
# if self.start_with_bos:
# src_data = [self.src_dict.convertToIdx(b,
# onmt.constants.UNK_WORD,
# onmt.constants.BOS_WORD)
# for b in src_sents]
# else:
# src_data = [self.src_dict.convertToIdx(b,
# onmt.constants.UNK_WORD)
# for b in src_sents]
# data_type = 'text'
# past_src_data = None
elif type == 'asr':
# no need to deal with this
src_data = src_sents
past_src_data = past_sents
data_type = 'audio'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
# tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
data_type=data_type,
batch_size_sents=self.opt.batch_size,
src_align_right=self.opt.src_align_right,
past_src_data=past_src_data)
def predict(self, src_data):
type = 'asr'
# (1) convert words to indexes
if isinstance(src_data[0], list) and type == 'asr':
batches = list()
for i, src_data_ in enumerate(src_data):
dataset = self.build_data(src_data_, None, type=type, past_sents=None)
batch = dataset.get_batch(0)
batches.append(batch)
else:
dataset = self.build_data(src_data, None, type=type)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
batch_size = batches[0].size
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
# (2) translate
# each model in the ensemble uses one batch in batches
probs = self.classify_batch(batches)
# (3) convert indexes to words
pred_score = []
for b in range(batch_size):
pred_score.append(
probs[b].tolist()
)
return pred_score
| 10,872
| 35.609428
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/global_translator.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class GlobalStreamTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.decoder_states = defaultdict(lambda: None)
self.prev_scores = torch.Tensor(self.opt.beam_size).fill_(0)
self.prev_lengths = torch.LongTensor(self.opt.beam_size).fill_(0)
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if hasattr(opt, 'dynamic_min_len_scale'):
self.dynamic_min_len_scale = opt.dynamic_min_len_scale
else:
self.dynamic_min_len_scale = 0.8
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
self.max_memory_size = opt.max_memory_size
for i in range(len(self.models)):
self.models[i].set_memory_size(self.max_memory_size, self.max_memory_size)
def reset_stream(self):
self.decoder_states = defaultdict(lambda: None)
def translateBatch(self, batch):
with torch.no_grad():
return self._translateBatch(batch)
def _translateBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
self.prev_scores = self.prev_scores.type_as(scores)
self.prev_lengths = self.prev_lengths.to(scores.device)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
beams = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
beams_buf = beams.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
beams[:, 0].fill_(0) # first one is the same ...
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
prefix_tokens = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
beams_clone = beams.index_select(0, bbsz_idx)
prev_lengths = self.prev_lengths.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
beams_clone = beams_clone[:, 0:step + 2]
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
raw_scores = eos_scores.clone()
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1 + prev_lengths) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
assert len(self.decoder_states) == 1
beam_buffers = self.decoder_states[0].get_beam_buffer(bbsz_idx)
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# looks like sent and unfin_idx are both 0 when batch_size is 1 ...
# until everything is finished
sents_seen.add((sent, unfin_idx))
def get_buffer():
buffer = dict()
for l in beam_buffers:
buffer[l] = dict()
# take that state
for key in beam_buffers[l]:
buffer[l][key] = beam_buffers[l][key][:, i, :].unsqueeze(1)
return buffer
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
'hidden_buffer': get_buffer(),
'raw_score': raw_scores[i]
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
for i in range(self.n_models):
# decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, streaming=False)
self.decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size,
previous_decoding_state=self.decoder_states[i],
streaming=True)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = min(math.ceil(int(src_len) * self.dynamic_max_len_scale), self.opt.max_sent_length)
min_len = math.ceil(int(src_len) * self.dynamic_min_len_scale)
else:
min_len = self.min_len
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
self.decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
# lprobs size: [batch x beam x vocab_size]
lprobs, avg_attn_scores = self._decode(decode_input, self.decoder_states)
avg_attn_scores = None
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.bos] = -math.inf # never select bos ...
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# if prefix_tokens is not None and step < prefix_tokens.size(1):
# prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
# prefix_mask = prefix_toks.ne(self.pad)
# lprobs[prefix_mask] = -math.inf
# lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
# -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
# )
# # if prefix includes eos, then we should make sure tokens and
# # scores are the same across all beams
# eos_mask = prefix_toks.eq(self.eos)
# if eos_mask.any():
# # validate that the first beam matches the prefix
# first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
# eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
# target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
# assert (first_beam == target_prefix).all()
#
# def replicate_first_beam(tensor, mask):
# tensor = tensor.view(-1, beam_size, tensor.size(-1))
# tensor[mask] = tensor[mask][:, :1, :]
# return tensor.view(-1, tensor.size(-1))
#
# # copy tokens, scores and lprobs from the first beam to all beams
# tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
# scores = replicate_first_beam(scores, eos_mask_batch_dim)
# lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
initial_score=self.prev_scores
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
# when bsz = 1, cand_bbsz_idx is not different than cand_beams
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
# so: cand_bbsz_idx is a list of beam indices
# eos_bbsz_idx in the case of batch_size 1: a list of beam_indices in which the eos is reached
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
# if batch size == 1 then this block will not be touched
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
# if prefix_tokens is not None:
# prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.index_select(
beams[:, :step + 1], dim=0, index=active_bbsz_idx,
out=beams_buf[:, step + 1],
)
# add the cand_indices (words) into the token buffer of the last step
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=beams_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
# print(cand_indices.size(), cand_bbsz_idx.size())
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
beams, beams_buf = beams_buf, beams
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
# Re-encoding step
# for beam in range(self.opt.beam_size):
# " batch size = 1 "
# tensor = finalized[0][beam]['tokens']
# words = " ".join(self.tgt_dict.convertToLabels(tensor, onmt.constants.EOS, including_stop=False))
# beam_org = finalized[0][beam]['beam_origin']
# print(beam_org, words)
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
for sent in range(len(finalized)):
for beam in range(len(finalized[sent])):
tensor = finalized[sent][beam]['tokens']
words = self.tgt_dict.convertToLabels(tensor, onmt.constants.EOS, including_stop=False)
n_words = len(words)
buffer_state = finalized[sent][beam]['hidden_buffer']
sentence = " ".join(words)
# self.prev_scores[beam].fill_(finalized[sent][beam]['raw_score'])
# self.prev_lengths[beam].fill_(n_words + 2)
# assign the buffers to the decoder_states
# at this point, we need to somehow make zero padding
self.decoder_states[sent].set_beam_buffer(finalized[sent])
# self.decoder_states = defaultdict(lambda: None)
# Should we do it before sorting, or after sorting
# Step 1: revert the memory of the decoder to the starting point
# Done. they are the buffer_state
# Step 3: Re-select the buffer (
# print(tensor)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# streaming = True in this case
decoder_output = self.models[i].step(tokens, decoder_states[i], streaming=True)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 25,704
| 41.557947
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/fast_translator.py
|
import sys
import onmt
import onmt.modules
import torch
import math
from onmt.model_factory import build_model, optimize_model
from onmt.inference.search import BeamSearch, Sampling
from onmt.inference.translator import Translator
from onmt.constants import add_tokenidx
from options import backward_compatible
# buggy lines: 392, 442, 384
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class FastTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.src_bos = onmt.constants.SRC_BOS
self.src_eos = onmt.constants.SRC_EOS
self.src_pad = onmt.constants.SRC_PAD
self.src_unk = onmt.constants.SRC_UNK
self.tgt_bos = self.bos_id
self.tgt_pad = onmt.constants.TGT_PAD
self.tgt_eos = onmt.constants.TGT_EOS
self.tgt_unk = onmt.constants.TGT_UNK
if opt.sampling:
self.search = Sampling(self.tgt_dict)
else:
self.search = BeamSearch(self.tgt_dict)
self.vocab_size = self.tgt_dict.size()
self.min_len = opt.min_sent_length
print("min len:", self.min_len)
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.buffering = not opt.no_buffering
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
# print('* Current bos id is: %d, default bos id is: %d' % (self.tgt_bos, onmt.constants.BOS))
print("src bos id is %d; src eos id is %d; src pad id is %d; src unk id is %d"
% (self.src_bos, self.src_eos, self.src_pad, self.src_unk))
print("tgt bos id is %d; tgt eos id is %d; tgt_pad id is %d; tgt unk id is %d"
% (self.tgt_bos, self.tgt_eos, self.tgt_pad, self.tgt_unk))
print('* Using fast beam search implementation')
if opt.vocab_list:
print("[INFO] reading the list of words from %s" % opt.vocab_list)
word_list = list()
for line in open(opt.vocab_list).readlines():
word = line.strip()
word_list.append(word)
self.filter = torch.Tensor(self.tgt_dict.size()).zero_()
# the eos and unk have to be in here
for word_idx in [self.tgt_eos, self.tgt_unk]:
self.filter[word_idx] = 1
for word in word_list:
idx = self.tgt_dict.lookup(word)
if idx is not None:
self.filter[idx] = 1
else:
print("WARNING: word %s does not exist in the dictionary" % word)
self.filter = self.filter.bool()
if opt.cuda:
self.filter = self.filter.cuda()
self.use_filter = True
elif opt.vocab_id_list:
ids = torch.load(opt.vocab_id_list)
print('[INFO] Loaded word list with %d ids' % len(ids))
self.filter = torch.Tensor(self.tgt_dict.size()).zero_()
for id in ids:
self.filter[id] = 1
self.filter = self.filter.bool()
if opt.cuda:
self.filter = self.filter.cuda()
self.use_filter = True
else:
self.use_filter = False
# Sub-model is used for ensembling Speech and Text models
if opt.sub_model:
self.sub_models = list()
self.sub_model_types = list()
# models are string with | as delimiter
sub_models = opt.sub_model.split("|")
print("Loading sub models ... ")
self.n_sub_models = len(sub_models)
self.sub_type = 'text'
for i, model_path in enumerate(sub_models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_not_load_state"):
model_opt.enc_not_load_state = True
model_opt.dec_not_load_state = True
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
# self.bos_token = model_opt.tgt_bos_word
""""BE CAREFUL: the sub-models might mismatch with the main models in terms of language dict"""
""""REQUIRE RE-matching"""
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
if opt.verbose:
print('Loading sub-model from %s' % model_path)
model = build_model (model_opt, checkpoint['dicts'], remove_pretrain=True)
optimize_model(model)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print(
"[INFO] fbgemm is not found in the available engines. "
" Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.sub_models.append(model)
self.sub_model_types.append(model_opt.model)
else:
self.n_sub_models = 0
self.sub_models = []
if opt.ensemble_weight:
ensemble_weight = [float(item) for item in opt.ensemble_weight.split("|")]
assert len(ensemble_weight) == self.n_models
if opt.sub_ensemble_weight:
sub_ensemble_weight = [float(item) for item in opt.sub_ensemble_weight.split("|")]
assert len(sub_ensemble_weight) == self.n_sub_models
ensemble_weight = ensemble_weight + sub_ensemble_weight
total = sum(ensemble_weight)
self.ensemble_weight = [ item / total for item in ensemble_weight]
else:
self.ensemble_weight = None
# Pretrained Classifier is used for combining classifier and speech models
if opt.pretrained_classifier:
self.pretrained_clfs = list()
# models are string with | as delimiter
clfs_models = opt.pretrained_classifier.split("|")
self.n_clfs = len(clfs_models)
for i, model_path in enumerate(clfs_models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
clf_dicts = checkpoint['dicts']
if opt.verbose:
print('Loading pretrained classifier from %s' % model_path)
from onmt.model_factory import build_classifier
model = build_classifier(model_opt, clf_dicts)
optimize_model(model)
model.load_state_dict(checkpoint['model'])
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print(
"[INFO] fbgemm is not found in the available engines. "
" Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.pretrained_clfs.append(model)
else:
self.n_clfs = 0
self.pretrained_clfs = list()
if "mbart-large-50" in opt.external_tokenizer.lower():
print("[INFO] Using the external MBART50 tokenizer...")
from transformers import MBart50TokenizerFast
try:
self.external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang=opt.src_lang)
except KeyError as e:
self.external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang="en_XX")
try:
self.tgt_external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang=opt.tgt_lang)
except KeyError as e:
self.tgt_external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang="en_XX")
elif "m2m100" in opt.external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
from transformers import M2M100Tokenizer
self.external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.src_lang)
self.tgt_external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.tgt_lang)
elif "deltalm" in opt.external_tokenizer.lower():
# print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
# from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
# self.external_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=opt.src_lang)
# self.tgt_external_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=opt.tgt_lang)
print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
lang_list = sorted(list(self.lang_dict.keys()))
from pretrain_module.tokenization_deltalm import MultilingualDeltaLMTokenizer
self.external_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50",
lang_list=lang_list,
src_lang=opt.src_lang)
self.tgt_external_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50",
lang_list=lang_list,
src_lang=opt.tgt_lang)
else:
self.external_tokenizer = None
self.tgt_external_tokenizer = None
def change_language(self, new_src_lang=None, new_tgt_lang=None, use_srclang_as_bos=True):
if new_src_lang is not None:
self.src_lang = new_src_lang
if new_tgt_lang is not None:
self.tgt_lang = new_tgt_lang
if use_srclang_as_bos:
self.bos_token = self.src_lang
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] New Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
else:
self.bos_token = self.tgt_lang
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] New Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
self.tgt_bos = self.bos_id
self.external_tokenizer.src_lang = self.src_lang
self.tgt_external_tokenizer.src_lang = self.tgt_lang
def translate_batch(self, batches, sub_batches=None, prefix_tokens=None, anti_prefix=None):
with torch.no_grad():
return self._translate_batch(batches, sub_batches=sub_batches, prefix_tokens=prefix_tokens,
anti_prefix=anti_prefix)
def _translate_batch(self, batches, sub_batches, prefix_tokens=None, anti_prefix=None):
batch = batches[0]
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode (also batches[0])
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.tgt_pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.tgt_bos) # first token is
# tokens[:, 1].fill_(self.tgt_bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.src_eos) & src_tokens.ne(self.src_pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
batchable_prefix = False
if prefix_tokens is not None:
prefix_tokens = prefix_tokens.to(src.device)
if bsz == 1:
batchable_prefix = True
else:
# check if padding is in prefix
pmask = prefix_tokens.eq(self.tgt_pad).long().sum()
if pmask.item() == 0:
batchable_prefix = True
if batchable_prefix:
prefix_tokens = prefix_tokens.repeat(beam_size, 1)
for b in range(bsz * beam_size):
for l in range(min(max_len + 2, prefix_tokens.size(1))):
tokens[b, l].fill_(prefix_tokens[b, l])
# In this case, the scores of the prefix positions should be 0
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.tgt_eos).any()
tokens_clone[:, step] = self.tgt_eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
decoder_states = dict()
sub_decoder_states = dict() # for sub-model
for i in range(self.n_models):
# if self.opt.pretrained_classifier:
# pretrained_layer_states = self.pretrained_clfs[i].encode(batches[i])
# else:
# pretrained_layer_states = None
pretrained_clf = self.pretrained_clfs[i] if self.opt.pretrained_classifier else None
decoder_states[i] = self.models[i].create_decoder_state(batches[i], beam_size, type=2,
buffering=self.buffering,
pretrained_classifier=pretrained_clf)
if self.opt.sub_model:
for i in range(self.n_sub_models):
sub_decoder_states[i] = self.sub_models[i].create_decoder_state(sub_batches[i], beam_size, type=2,
buffering=self.buffering)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = math.ceil(int(src_len) * self.dynamic_max_len_scale)
# Start decoding
if prefix_tokens is not None:
if batchable_prefix:
# for this case we run the whole prefix as a preparation step,
# decoding starts from the last of the prefix
step = prefix_tokens.size(1) - 1
else:
# in this case we run decoding as usual but filter the output words for prefix
step = 0
else:
step = 0
# step = 0 if (prefix_tokens is None and bsz == 1) else prefix_tokens.size(1) - 1
# for step in range(max_len + 1): # one extra step for EOS marker
while step < (max_len + 1):
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
decoder_states[i]._reorder_incremental_state(reorder_state)
for i, model in enumerate(self.sub_models):
sub_decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
# print(batches[0].get('source'))
# print(decode_input)
lprobs, avg_attn_scores = self._decode(decode_input, decoder_states,
sub_decoder_states=sub_decoder_states)
avg_attn_scores = None
lprobs = lprobs.contiguous()
if self.use_filter:
# the marked words are 1, so fill the reverse to inf
lprobs.masked_fill_(~self.filter.unsqueeze(0), -math.inf)
lprobs[:, self.tgt_pad] = -math.inf # never select pad
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.tgt_eos] = -math.inf
lprobs[:, self.tgt_eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.tgt_eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# here prefix tokens is a list of word-ids
if prefix_tokens is not None and not batchable_prefix:
if step < prefix_tokens.size(1) and step < max_len:
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.tgt_pad)
# originally infinity here, this number can return nan so thats quite dangerous
# put a large negative number here is better
lprobs[prefix_mask] = torch.tensor(-21111993).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# lprobs[prefix_mask].scatter_()
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.tgt_eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
else:
# force tgt_eos to not appear
lprobs[:, self.tgt_eos] = -math.inf
if anti_prefix is not None:
# check the step closest to the end of anti prefix
if step == len(anti_prefix) - 1:
_anti_prefix = anti_prefix[step]
for i in range(tokens.size(0)):
decoded_ = tokens[i][1:step+1]
if decoded_.tolist() == anti_prefix[:-1]:
lprobs[i, _anti_prefix] = -math.inf
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.tgt_eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx.resize_(0),
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores.resize_(0),
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
# assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero(as_tuple=False).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None and not batchable_prefix:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask.resize_(0),
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist.resize_(0), active_hypos.resize_(0))
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx.resize_(0),
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
step = step + 1
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states, sub_decoder_states=None):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# decoder output contains the log-prob distribution of the next step
decoder_output = self.models[i].step(tokens, decoder_states[i])
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
for j in range(self.n_sub_models):
sub_decoder_output = self.sub_models[j].step(tokens, sub_decoder_states[j])
outs[self.n_models + j] = sub_decoder_output['log_prob']
out = self._combine_outputs(outs, weight=self.ensemble_weight)
# attn = self._combine_attention(attns)
if self.vocab_size > out.size(-1):
self.vocab_size = out.size(-1) # what the hell ?
# attn = attn[:, -1, :] # I dont know what this line does
attn = None # attn is never used in decoding probably
return out, attn
def build_prefix(self, prefixes, bsz=None):
"""
:param bsz:
:param prefixes: List of strings
:return:
"""
if self.external_tokenizer is None:
prefix_data = [self.tgt_dict.convertToIdx(sent.split(),
onmt.constants.UNK_WORD)
for sent in prefixes]
else:
# move the last element which is <eos>
if self.opt.force_bos:
_prefix_data = [torch.LongTensor([self.bos_id] + self.external_tokenizer(sent)['input_ids'][:-1])
for sent in prefixes]
else:
_prefix_data = [torch.LongTensor(self.external_tokenizer(sent)['input_ids'][:-1])
for sent in prefixes]
prefix_data = _prefix_data
#
new_prefix_data = []
#
for prefix_tensor in prefix_data:
if "MultilingualDeltaLM" in self.external_tokenizer.__class__.__name__:
pass
else:
prefix_tensor[0] = self.bos_id
new_prefix_data.append(prefix_tensor)
#
prefix_data = new_prefix_data
# _listed_tensor = prefix_tensor.tolist()
# if _listed_tensor[0] == self.tgt_bos:
# _listed_tensor = _listed_tensor[1:]
# if _listed_tensor[0] == self.tgt_eos:
# _listed_tensor = _listed_tensor[:-1]
# prefix_data.append(torch.LongTensor(_listed_tensor))
# clone the same prefix for multiple sentences
if len(prefix_data) == 1 and bsz > 1:
prefix_data = prefix_data * bsz
# collate into the same tensor with padding
lengths = [x.size(0) for x in prefix_data]
max_length = max(lengths)
tensor = prefix_data[0].new(len(prefix_data), max_length).fill_(self.tgt_pad)
for i in range(len(prefix_data)):
data_length = prefix_data[i].size(0)
offset = 0
tensor[i].narrow(0, offset, data_length).copy_(prefix_data[i])
return tensor
def build_anti_prefix(self, anti_prefix):
"""
:param bsz:
:param prefixes: List of strings
:return:
"""
if self.external_tokenizer is None:
anti_prefix = self.tgt_dict.convertToIdx(anti_prefix.split(),
onmt.constants.UNK_WORD)
else:
# move the last element which is <eos>
# if self.opt.force_bos:
# _prefix_data = [torch.LongTensor([self.bos_id] + self.external_tokenizer(sent)['input_ids'][:-1])
# for sent in prefixes]
# else:
# _prefix_data = [torch.LongTensor(self.external_tokenizer(sent)['input_ids'][:-1])
# for sent in prefixes]
_anti_prefix_data = self.external_tokenizer(anti_prefix)['input_ids'][:-1]
_anti_prefix_data = _anti_prefix_data[1:]
anti_prefix = torch.LongTensor(_anti_prefix_data)
anti_prefix = anti_prefix.tolist()
return anti_prefix
# override the "build_data" from parent Translator
def build_data(self, src_sents, tgt_sents, type='mt', past_sents=None):
# This needs to be the same as preprocess.py.
data_type = 'text'
if type == 'mt':
if self.external_tokenizer is None:
# TODO: add external tokenizer
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
if past_sents is not None:
if self.start_with_bos:
past_src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in past_sents]
else:
past_src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in past_sents]
else:
past_src_data = None
else:
src_data = [torch.LongTensor(self.external_tokenizer(" ".join(b))['input_ids'])
for b in src_sents]
if past_sents is not None:
past_src_data = [torch.LongTensor(self.external_tokenizer(" ".join(b))['input_ids'])
for b in past_src_data]
else:
past_src_data = None
elif type == 'asr':
# no need to deal with this
src_data = src_sents
past_src_data = past_sents
data_type = 'audio'
elif type == 'asr_wav':
src_data = src_sents
past_src_data = past_sents
data_type = 'wav'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
if self.tgt_external_tokenizer is not None:
tgt_data = [torch.LongTensor(self.tgt_external_tokenizer(" ".join(b))['input_ids'])
for b in tgt_sents]
else:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
if self.src_lang in self.lang_dict:
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
else:
src_lang_data = [torch.Tensor([0])]
if self.tgt_lang in self.lang_dict:
tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
else:
tgt_lang_data = [torch.Tensor([0])]
try:
src_atb = self.opt.src_atb
if src_atb in self.atb_dict:
src_atb_data = [torch.Tensor([self.atb_dict[src_atb]])]
else:
src_atb_data = None
except AttributeError:
src_atb_data = None
try:
tgt_atb = self.opt.tgt_atb
if tgt_atb in self.atb_dict:
tgt_atb_data = [torch.Tensor([self.atb_dict[tgt_atb]])]
else:
tgt_atb_data = None
except AttributeError:
tgt_atb_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
src_atbs=src_atb_data, tgt_atbs=tgt_atb_data,
batch_size_words=sys.maxsize,
batch_size_frames=sys.maxsize,
cut_off_size=sys.maxsize,
smallest_batch_size=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=self.opt.src_align_right,
past_src_data=past_src_data)
def translate(self, src_data, tgt_data, past_src_data=None, sub_src_data=None, type='mt',
prefix=None, anti_prefix=None):
if past_src_data is None or len(past_src_data) == 0:
past_src_data = None
# (1) convert words to indexes
if isinstance(src_data[0], list) and type in ['asr', 'asr_wav']:
batches = list()
for i, src_data_ in enumerate(src_data):
if past_src_data is not None:
past_src_data_ = past_src_data[i]
else:
past_src_data_ = None
dataset = self.build_data(src_data_, tgt_data, type=type, past_sents=past_src_data_)
batch = dataset.get_batch(0)
batches.append(batch)
elif isinstance(src_data[0], list) and isinstance(src_data[0][0], list):
src_data = src_data[0]
dataset = self.build_data(src_data, tgt_data, type=type, past_sents=past_src_data)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
else:
dataset = self.build_data(src_data, tgt_data, type=type, past_sents=past_src_data)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
if sub_src_data is not None and len(sub_src_data) > 0:
sub_dataset = self.build_data(sub_src_data, tgt_data, type='mt')
sub_batch = sub_dataset.get_batch(0)
sub_batches = [sub_batch] * self.n_sub_models
sub_src_data = [sub_src_data] * self.n_sub_models
else:
sub_batches, sub_src_data = None, None
batch_size = batches[0].size
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
if sub_batches:
for i, _ in enumerate(sub_batches):
sub_batches[i].cuda(fp16=self.fp16)
if prefix is not None:
prefix_tensor = self.build_prefix(prefix, bsz=batch_size)
else:
prefix_tensor = None
if anti_prefix is not None:
anti_prefix = self.build_anti_prefix(anti_prefix)
print("ANTI PREFIX:", anti_prefix)
# (2) translate
# each model in the ensemble uses one batch in batches
finalized, gold_score, gold_words, allgold_words = self.translate_batch(batches, sub_batches=sub_batches,
prefix_tokens=prefix_tensor,
anti_prefix=anti_prefix)
pred_length = []
# (3) convert indexes to words
pred_batch = []
pred_ids = []
src_data = src_data[0]
for b in range(batch_size):
# probably when the src is empty so beam search stops immediately
if len(finalized[b]) == 0:
# assert len(src_data[b]) == 0, "The target search result is empty, assuming that the source is empty."
pred_batch.append(
[self.build_target_tokens([], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_ids.append([[] for n in range(self.opt.n_best)])
else:
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_ids.append([finalized[b][n]['tokens'] for n in range(self.opt.n_best)])
pred_score = []
for b in range(batch_size):
if len(finalized[b]) == 0:
pred_score.append(
[torch.FloatTensor([0])
for n in range(self.opt.n_best)]
)
else:
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_ids, pred_score, pred_length, gold_score, gold_words, allgold_words
| 48,877
| 42.641071
| 126
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/search.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import onmt
class Search(object):
def __init__(self, tgt_dict):
# self.pad = onmt.constants.PAD
# self.unk = onmt.constants.UNK
# self.eos = onmt.constants.EOS
# self.bos = onmt.constants.BOS
self.vocab_size = tgt_dict.size()
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores, beam_size):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
:param lprobs:
:param step:
:param scores:
:param beam_size:
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def step(self, step, lprobs, scores, initial_score=None, **kwargs):
super()._init_buffers(lprobs)
# batch size first, then beam size
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
if initial_score is None or torch.sum(initial_score).item() == 0:
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
lprobs.add_(initial_score.unsqueeze(-1))
# if we don't do this, the first beam will contain top K of exactly the same thing ...
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# here lprobs should be (bsz, beam_size, V) (in streaming, bsz should be 1)
torch.topk(
lprobs.view(bsz, -1), # after view, it should be (bsz, beam_size x V)
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - beam_size, # -beam_size so we never select pad (beam_size times)
),
out=(self.scores_buf.resize_(0), self.indices_buf.resize_(0)),
)
# torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
# beams_buf helps us know where the origin of each
self.beams_buf = torch.true_divide(self.indices_buf, vocab_size).long()
# indices: the word indices in the vocabulary
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
group_size = beam_size // self.num_groups
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens = None,
original_batch_idxs = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
| 11,084
| 37.224138
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/ColdFusionTranslator.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model, build_fusion, build_language_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class EnsembleTranslator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
self.beta = opt.beta
self.alpha = opt.alpha
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model in enumerate(models):
if opt.verbose:
print('Loading model from %s' % model)
checkpoint = torch.load(model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
# Build model from the saved option
# if hasattr(model_opt, 'fusion') and model_opt.fusion == True:
# print("* Loading a FUSION model")
# model = build_fusion(model_opt, checkpoint['dicts'])
# else:
# model = build_model(model_opt, checkpoint['dicts'])
model = build_model(model_opt)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, lm_chkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None :
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
#posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
#self.models[0].decoder.renew_buffer(posSize)
#self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0],model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
if opt.fp16:
self.autoencoder = self.autoencoder.half()
self.models[0] = self.models[0].half()
if opt.verbose:
print('Done')
def init_beam_accum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
# Combine distributions from different models
def _combine_outputs(self, outputs):
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i])
output.div(len(outputs))
# output = torch.log(output)
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean":
output = torch.exp(outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i])
output.div(len(outputs))
# output = torch.log(output)
output = torch.log(output)
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError('Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combine_attention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def build_data(self, src_sents, tgt_sents):
# This needs to be the same as preprocess.py.
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data, sys.maxsize
, data_type=self._type,
batch_size_sents =self.opt.batch_size)
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data, sys.maxsize,
data_type=self._type, batch_size_sents =self.opt.batch_size)
def build_target_tokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def translate_batch(self, batch):
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
batch_size = batch.size
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# time x batch * beam
# initialize the beam
beam = [onmt.Beam(beam_size, self.opt.cuda) for k in range(batch_size)]
batch_idx = list(range(batch_size))
remaining_sents = batch_size
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size)
if self.opt.lm:
lm_decoder_states = self.lm_model.create_decoder_state(batch, beam_size)
for i in range(self.opt.max_sent_length):
# Prepare decoder input.
# input size: 1 x ( batch * beam )
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).t().contiguous().view(1, -1)
decoder_input = input
# require batch first for everything
outs = dict()
attns = dict()
for k in range(self.n_models):
# decoder_hidden, coverage = self.models[k].decoder.step(decoder_input.clone(), decoder_states[k])
decoder_output = self.models[k].step(decoder_input.clone(), decoder_states[k])
outs[k] = decoder_output['log_prob']
attns[k] = decoder_output['coverage']
# outs[k] = self.models[k].generator[0](decoder_hidden)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[k] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# if(hasattr(self, 'autoencoder') and self.autoencoder
# and self.autoencoder.representation == "DecoderHiddenState"):
# decoder_hidden = self.autoencoder.autocode(decoder_hidden)
# batch * beam x vocab_size
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
if self.opt.lm:
lm_decoder_output = self.lm_model.step(decoder_input.clone(), lm_decoder_states)
# fusion
out = out + 0.3 * lm_decoder_output
word_lk = out.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
attn = attn.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
active = []
for b in range(batch_size):
if beam[b].done:
continue
idx = batch_idx[b]
if not beam[b].advance(word_lk.data[idx], attn.data[idx]):
active += [b]
for j in range(self.n_models):
decoder_states[j].update_beam(beam, b, remaining_sents, idx)
if not active:
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
active_idx = self.tt.LongTensor([batch_idx[k] for k in active])
batch_idx = {beam: idx for idx, beam in enumerate(active)}
for j in range(self.n_models):
decoder_states[j].prune_complete_beam(active_idx, remaining_sents)
remaining_sents = len(active)
# (4) package everything up
all_hyp, all_scores, all_attn = [], [], []
n_best = self.opt.n_best
all_lengths = []
for b in range(batch_size):
scores, ks = beam[b].sortBest()
all_scores += [scores[:n_best]]
hyps, attn, length = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
all_hyp += [hyps]
all_lengths += [length]
# if(src_data.data.dim() == 3):
if self.opt.encoder_type == 'audio':
valid_attn = decoder_states[0].original_src.narrow(2,0,1).squeeze(2)[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
else:
valid_attn = decoder_states[0].original_src[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
attn = [a.index_select(1, valid_attn) for a in attn]
all_attn += [attn]
if self.beam_accum:
self.beam_accum["beam_parent_ids"].append(
[t.tolist()
for t in beam[b].prevKs])
self.beam_accum["scores"].append([
["%4f" % s for s in t.tolist()]
for t in beam[b].all_scores][1:])
self.beam_accum["predicted_ids"].append(
[[self.tgt_dict.getLabel(id)
for id in t.tolist()]
for t in beam[b].nextYs][1:])
torch.set_grad_enabled(True)
return all_hyp, all_scores, all_attn, all_lengths, gold_scores, gold_words, allgold_scores
def translate(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words,allgold_words
def translate_asr(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_asr_data(src_data, tgt_data)
# src, tgt = batch
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words,allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words,allgold_words
| 15,611
| 35.138889
| 124
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/nam_translate.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['transformer', 'stochastic_transformer']
class FastTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.buffering = not opt.no_buffering
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
def translateBatch(self, batch, prefix=None):
with torch.no_grad():
return self._translateBatch(batch, prefix_tokens=prefix)
def _translateBatch(self, batch, prefix_tokens=None):
"""
:param batch:
:param prefix_tokens:
:return:
"""
# Batch size is in different location depending on data.
# prefix_tokens = None
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
# print(hypo_attn.shape)
# print(tokens_clone[i])
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, buffering=self.buffering)
len_context = decoder_states[i].context.size(0)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = math.ceil(int(src_len) * self.dynamic_max_len_scale)
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
lprobs, avg_attn_scores = self._decode(decode_input, decoder_states)
# avg_attn_scores = None
# lprobs[:, self.pad] = -math.inf # never select pad
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# prefix_tokens = torch.tensor([[798, 1354]]).type_as(tokens)
# prefix_tokens = [[1000, 1354, 2443, 1475, 1010, 242, 127, 1191, 902, 1808, 1589, 26]]
if prefix_tokens is not None:
prefix_tokens = torch.tensor(prefix_tokens).type_as(tokens)
if step < prefix_tokens.size(1) and step < max_len:
prefix_tokens = torch.tensor(prefix_tokens).type_as(tokens)
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, len_context , max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
# print(lprobs.shape)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero(as_tuple=False).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# tokens[:,-1] = tokens[0,-1]
decoder_output = self.models[i].step(tokens, decoder_states[i])
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
if self.vocab_size > out.size(-1):
self.vocab_size = out.size(-13)
# attn = attn[:, -1, :] # I dont know what this line means
#attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
# for i in range(19999):
# print(32423)
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
print(finalized)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
def translate_incl(self, src_data, tgt_data, prefix = None, type='mt'):
# (1) convert words to indexes
# for i in range(19999):
# print(32423)
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch, prefix = prefix)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return finalized[0][0], pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 23,188
| 40.483005
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/inference/translator.py
|
import onmt
import onmt.modules
import torch
from onmt.model_factory import build_model, build_language_model, optimize_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Translator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
self.beta = opt.beta
self.alpha = opt.alpha
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.attributes = opt.attributes # attributes split by |. for example: de|domain1
self.bos_token = opt.bos_token
self.sampling = opt.sampling
self.src_lang = opt.src_lang
self.tgt_lang = opt.tgt_lang
if self.attributes:
self.attributes = self.attributes.split("|")
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model_path in enumerate(models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_state_dict"):
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
self.main_model_opt = model_opt
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
# self.bos_token = model_opt.tgt_bos_word
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
if "langs" in checkpoint["dicts"]:
self.lang_dict = checkpoint['dicts']['langs']
else:
self.lang_dict = {'src': 0, 'tgt': 1}
if "atbs" in checkpoint["dicts"]:
self.atb_dict = checkpoint['dicts']['atbs']
else:
self.atb_dict = {'nothingness': 0}
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
model = build_model(model_opt, checkpoint['dicts'], remove_pretrain=True)
if opt.verbose:
print('Loading model from %s' % model_path)
try:
model.load_state_dict(checkpoint['model'])
optimize_model(model)
except RuntimeError:
optimize_model(model)
try:
model.load_state_dict(checkpoint['model'])
except RuntimeError:
model.load_state_dict(checkpoint['model'], strict=True)
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print("[INFO] fbgemm is not found in the available engines. Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
# convert the custom functions to their autograd equivalent first
model.convert_autograd()
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, checkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None:
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
# self.models[0].decoder.renew_buffer(posSize)
# self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0], model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
self.models[0].autoencoder = self.autoencoder
if opt.verbose:
print('Done')
def init_beam_accum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
# Combine distributions from different models
def _combine_outputs(self, outputs, weight=None):
if weight is None:
weight = [1.0/len(outputs) for _ in range(len(outputs))]
# in case outputs have difference vocabulary sizes: take the shortest common one
sizes = [output_.size(-1) for output_ in outputs.values()]
min_size = min(sizes)
for key in outputs:
outputs[key] = outputs[key][:, :min_size]
# outputs = resized_outputs
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0]) * weight[0]
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i] * weight[i])
# output.div_(len(outputs))
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean": # default one
output = torch.exp(outputs[0]) * weight[0]
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i]) * weight[i]
# output.div_(len(outputs))
output = torch.log(output)
elif self.ensemble_op == "max":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.max(output, outputs[i])
elif self.ensemble_op == "min":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.min(output, outputs[i])
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError(
'Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combine_attention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def build_data(self, src_sents, tgt_sents, type='mt'):
# This needs to be the same as preprocess.py.
if type == 'mt':
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
data_type = 'text'
elif type == 'asr':
# no need to deal with this
src_data = src_sents
data_type = 'audio'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
data_type=data_type,
batch_size_sents=self.opt.batch_size,
src_align_right=self.opt.src_align_right)
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data,
batch_size_words=sys.maxsize,
data_type=self._type, batch_size_sents=self.opt.batch_size)
def build_target_tokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def translate_batch(self, batch):
if isinstance(batch, list):
batch = batch[0]
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
batch_size = batch.size
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# time x batch * beam
# initialize the beam
beam = [onmt.Beam(beam_size, self.bos_id, self.opt.cuda, self.opt.sampling) for k in range(batch_size)]
batch_idx = list(range(batch_size))
remaining_sents = batch_size
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size)
if self.opt.lm:
lm_decoder_states = self.lm_model.create_decoder_state(batch, beam_size)
for i in range(self.opt.max_sent_length):
# Prepare decoder input.
# input size: 1 x ( batch * beam )
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).t().contiguous().view(1, -1)
decoder_input = input
# require batch first for everything
outs = dict()
attns = dict()
for k in range(self.n_models):
# decoder_hidden, coverage = self.models[k].decoder.step(decoder_input.clone(), decoder_states[k])
# run decoding on the model
decoder_output = self.models[k].step(decoder_input.clone(), decoder_states[k])
# extract the required tensors from the output (a dictionary)
outs[k] = decoder_output['log_prob']
attns[k] = decoder_output['coverage']
# for ensembling models
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# for lm fusion
if self.opt.lm:
lm_decoder_output = self.lm_model.step(decoder_input.clone(), lm_decoder_states)
# fusion
lm_out = lm_decoder_output['log_prob']
# out = out + 0.3 * lm_out
out = lm_out
word_lk = out.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
attn = attn.contiguous().view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
active = []
for b in range(batch_size):
if beam[b].done:
continue
idx = batch_idx[b]
if not beam[b].advance(word_lk.data[idx], attn.data[idx]):
active += [b]
for j in range(self.n_models):
decoder_states[j].update_beam(beam, b, remaining_sents, idx)
if self.opt.lm:
lm_decoder_states.update_beam(beam, b, remaining_sents, idx)
if not active:
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
active_idx = self.tt.LongTensor([batch_idx[k] for k in active])
batch_idx = {beam: idx for idx, beam in enumerate(active)}
for j in range(self.n_models):
decoder_states[j].prune_complete_beam(active_idx, remaining_sents)
if self.opt.lm:
lm_decoder_states.prune_complete_beam(active_idx, remaining_sents)
remaining_sents = len(active)
# (4) package everything up
all_hyp, all_scores, all_attn = [], [], []
n_best = self.opt.n_best
all_lengths = []
for b in range(batch_size):
scores, ks = beam[b].sortBest()
all_scores += [scores[:n_best]]
hyps, attn, length = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
all_hyp += [hyps]
all_lengths += [length]
# if(src_data.data.dim() == 3):
if self.opt.encoder_type == 'audio':
valid_attn = decoder_states[0].original_src.narrow(2, 0, 1).squeeze(2)[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
else:
valid_attn = decoder_states[0].original_src[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
# print(valid_attn)
# for a in attn:
# print(a.shape)
attn = [a for a in attn]
all_attn += [attn]
if self.beam_accum:
self.beam_accum["beam_parent_ids"].append(
[t.tolist()
for t in beam[b].prevKs])
self.beam_accum["scores"].append([
["%4f" % s for s in t.tolist()]
for t in beam[b].all_scores][1:])
self.beam_accum["predicted_ids"].append(
[[self.tgt_dict.getLabel(id)
for id in t.tolist()]
for t in beam[b].nextYs][1:])
torch.set_grad_enabled(True)
return all_hyp, all_scores, all_attn, all_lengths, gold_scores, gold_words, allgold_scores
def translate(self, src_data, tgt_data, type="mt"):
if isinstance(src_data[0], list) and type == 'asr':
batches = list()
for src_data_ in src_data:
dataset = self.build_data(src_data_, tgt_data, type=type)
batch = dataset.get_batch(0)
batches.append(batch)
else:
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
batch_size = batches[0].size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batches)
# (3) convert indexes to words
src_data = src_data[0]
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
pred_ids = pred
return pred_batch, pred_score, pred_length, pred, gold_score, gold_words, allgold_words
def translate_asr(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_asr_data(src_data, tgt_data)
# src, tgt = batch
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 19,446
| 35.485929
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/stochastic_transformer_layers.py
|
import torch
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
class StochasticEncoderLayer(EncoderLayer):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, death_rate=0.0):
super().__init__(h, d_model, p, d_ff, attn_p, version)
# super(StochasticEncoderLayer, self).__init__()
self.death_rate = death_rate
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
out, _ = self.multihead(query, query, query, attn_mask)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input
class StochasticDecoderLayer(DecoderLayer):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, death_rate=0.0):
super().__init__(h, d_model, p, d_ff, attn_p, version)
self.death_rate = death_rate
def forward(self, input, context, mask_tgt, mask_src):
""" Self attention layer
layernorm > attn > dropout > residual
"""
"""
input is 'unnormalized' so the first preprocess layer is to normalize it before attention
output (input after stacked with other outputs) is also unnormalized (to be normalized in the next layer)
so if we skip the layer and propagate input forward:
"""
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# During testing we scale the output to match its participation during training
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input, coverage
| 4,693
| 30.716216
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/Stats.py
|
""" Statistics calculation utility """
from __future__ import division
import time
import math
import sys
import datetime
from onmt.train_utils.Meters import AverageMeter, TimeMeter
class Logger(object):
def __init__(self, optim, scaler=None):
self.optim = optim
self.meters = dict()
self.start_time = time.time()
self.scaler = scaler
# initializing the meters
self.meters["total_loss"] = AverageMeter()
self.meters["total_words"] = AverageMeter()
self.meters["report_loss"] = AverageMeter()
self.meters["report_tgt_words"] = AverageMeter()
self.meters["report_src_words"] = AverageMeter()
self.meters["kl"] = AverageMeter()
self.meters["kl_prior"] = AverageMeter()
self.meters["gnorm"] = AverageMeter()
self.meters["oom"] = AverageMeter()
self.meters["total_sloss"] = AverageMeter()
self.meters["baseline"] = AverageMeter()
self.meters["R"] = AverageMeter()
self.meters["ce"] = AverageMeter()
self.meters["q_entropy"] = AverageMeter()
self.meters["q_mean"] = AverageMeter()
self.meters["q_var"] = AverageMeter()
self.meters["l2"] = AverageMeter()
self.meters["l2_target"] = AverageMeter()
self.meters["total_lang_correct"] = AverageMeter()
self.meters["total_sents"] = AverageMeter()
def reset(self):
for key in self.meters:
self.meters[key].reset()
self.start_time = time.time()
def reset_meter(self, key):
self.meters[key].reset()
def reset_time(self):
self.start_time = time.time()
def log(self, epoch, iteration, data_size):
ppl = math.exp(self.meters["report_loss"].sum / self.meters["report_tgt_words"].sum)
grad_norm = self.meters["gnorm"].avg
oom_count = self.meters["oom"].sum
baseline = self.meters['baseline'].avg
kl = self.meters['kl'].avg # normalized by 6 distributions and the batch_size
R = self.meters['R'].avg #
ce = self.meters['ce'].avg
q_ent = self.meters['q_entropy'].avg
q_mean = self.meters['q_mean'].avg
q_var = self.meters['q_var'].avg
kl_prior = self.meters['kl_prior'].avg
l2 = self.meters['l2'].avg if 'l2' in self.meters else None
l2_target = self.meters['l2_target'].avg if 'l2_target' in self.meters else None
log_string = (("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; lr: %.7f ; num updates: %7d "
+ "%5.0f tgt tok/s; gnorm %.3f; oom %d") %
(epoch, iteration+1, data_size,
ppl,
self.optim.getLearningRate(),
self.optim._step,
self.meters["report_tgt_words"].sum/(time.time()-self.start_time),
grad_norm if grad_norm else 0,
oom_count))
if ce is not None:
log_string += "; ce %.3f" % ce
if baseline is not None:
log_string += "; bl %.3f" % baseline
if kl is not None:
log_string += "; kl %.3f" % kl
if kl_prior is not None:
log_string += "; kl_prior %.3f" % kl_prior
if R is not None:
log_string += "; R %.3f" % R
if q_ent is not None:
log_string += "; q_ent %.3f" % q_ent
if q_mean is not None:
log_string += "; q_mean %.3f" % q_mean
if q_var is not None:
log_string += "; q_var %.3f" % q_var
if self.meters['total_lang_correct'].avg is not None:
total_lang_correct = self.meters['total_lang_correct'].sum
acc = total_lang_correct / self.meters['total_sents'].sum * 100.0
log_string += "; acc %.3f " % acc
if l2 is not None:
log_string += "; l2 %.3f" % l2
if l2_target is not None:
log_string += "; l2 target %.3f" % l2_target
# Don't forget to print this ...
print(log_string)
| 4,077
| 33.559322
| 93
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/stochastic_transformers.py
|
import numpy as np
import torch, math
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.stochastic_transformer_layers import StochasticEncoderLayer, StochasticDecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, PrePostProcessing
from onmt.modules.linear import FeedForward, FeedForwardSwish
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
def expected_length(length, death_rate):
e_length = 0
for l in range(length):
survival_rate = 1.0 - (l+1)/length*death_rate
e_length += survival_rate
return e_length
class StochasticTransformerEncoder(TransformerEncoder):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder, encoder_type='text'):
self.death_rate = opt.death_rate
# build_modules will be called from the inherited constructor
super(StochasticTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type)
e_length = expected_length(self.layers, self.death_rate)
print("Stochastic Encoder with %.2f expected layers" % e_length)
def build_modules(self):
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = ( l + 1.0 ) / self.layers * self.death_rate
block = StochasticEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout, death_rate=death_r)
self.layer_modules.append(block)
class StochasticTransformerDecoder(TransformerDecoder):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder, attribute_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
# build_modules will be called from the inherited constructor
super(StochasticTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
attribute_embeddings,
ignore_source)
e_length = expected_length(self.layers, self.death_rate)
print("Stochastic Decoder with %.2f expected layers" % e_length)
def build_modules(self):
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = ( l + 1 ) / self.layers * self.death_rate
block = StochasticDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout, death_rate=death_r)
self.layer_modules.append(block)
| 3,473
| 32.085714
| 143
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/Meters.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = None
self.sum = 0
self.count = 0
def is_valid(self):
return (self.count > 0)
def update(self, val, n=1):
if val is not None:
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| 1,840
| 22.602564
| 78
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/DynamicTransformer/Dlcl.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Wang Qiang
@contact: wangqiangneu@gmail.com
@desc: connection schema between layers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DynamicLinearCombination(nn.Module):
"""Implementation of Dynamic Linear Combination of Layers (DLCL)
for pre-norm, x_{l+1} = \sum_{k=0}^{l}{W_k^{l+1}LN(y_k)}
for post-norm, x_{l+1} = LN(\sum_{k=0}^{l}{W_k^{l+1}y_k})
"""
def __init__(self, model_size, n_layers, is_encoder=True, include_sublayer=False):
super(DynamicLinearCombination, self).__init__()
self.normalize_learned_weight = True
self.normalized_weight = None
self.weight_type = 'scalar'
self.out_dropout = 0.0
self.normalize_before = True
self.dim = model_size
# transformer encoder has 2 sub-layers, decoder has 3 sub-layers
if include_sublayer:
layer_num = 1 + (2 * n_layers if is_encoder else 3 * n_layers)
else:
layer_num = 1 + (n_layers if is_encoder else n_layers)
# init weights and corresponding masks
learnable = True
# combine everything from the past
self.history_window_size = -1
self.weight, self.weight_mask = self._init(layer_num, 'avg', self.weight_type,
-1, learnable)
normalize_embed = False
# init triangular layer norm
if normalize_embed:
self.layer_norms = nn.ModuleList([nn.LayerNorm(self.dim) for _ in range(layer_num)])
else:
self.layer_norms = nn.ModuleList([nn.Sequential()] + [nn.LayerNorm(self.dim) for _ in range(layer_num-1)])
# states
self.count = 0
self.layers = []
@staticmethod
def _init_mask(n_layer, window_size):
mask = np.zeros([n_layer, n_layer], dtype=np.float32)
# all preceding layers
if window_size == -1:
for i in range(mask.shape[0]):
mask[i, :(i+1)] = 1
else:
for i in range(mask.shape[0]):
mask[i, max(0, i + 1 - window_size): (i+1)] = 1
return torch.from_numpy(mask)
@staticmethod
def _init_weight(np_mask, dim=1, init_value='avg', learnable=True):
np_weight = np.copy(np_mask)
if init_value == 'avg':
np_weight = np_weight / np.sum(np_weight, axis=1, keepdims=True)
elif init_value == 'one':
np_weight[:, :] = 1.
else:
raise ValueError('unknown init_value:{}'.format(init_value))
weight_tensor = torch.from_numpy(np_weight).unsqueeze(2)
if dim > 1:
weight_tensor = weight_tensor.repeat(1, 1, dim)
weight_tensor = torch.nn.Parameter(weight_tensor, requires_grad=learnable)
return weight_tensor
def _init(self, layer_num, init_value, weight_type, window_size=-1, learnable=True):
"""
:param layer_num: total layers
:param init_value: initial weight value
:param weight_type: granularity of learned weights (scalar, scalar_X, vector)
:param window_size: past windows size of layers
:param learnable: if allow to learn weights
:return:
weight_tensor:
1. L x L x 1 if weight type='scalar'
2. L x L x X if weight type='scalar_X'
3. L x L x H if weight type='vector'
weight_mask: L x L, 0 means padding
"""
"""
weight shape is:
1. L x L x 1 for weight type='scalar'
2. L x L x X for weight type='scalar_X'
3. L x L x H for weight type='vector'
mask shape is L x L
:return:
"""
# L x L
mask_tensor = self._init_mask(layer_num, window_size)
if weight_type == 'scalar':
self.last_dim = 1
elif weight_type == 'vector':
self.last_dim = self.dim
elif weight_type.startswith('scalar_'):
n = int(weight_type.split('_')[1])
assert self.dim % n == 0
self.last_dim = n
else:
raise ValueError('unknown weight_type:{}'.format(weight_type))
weight_tensor = self._init_weight(mask_tensor.numpy(), self.last_dim, init_value,
learnable=learnable)
return weight_tensor, mask_tensor
def push(self, layer):
self.count += 1
# first layer
if self.count == 1:
self.layers.append(self.layer_norms[0](layer))
# compatible when running on CPU
if layer.is_cuda and not self.weight_mask.is_cuda:
self.weight_mask = self.weight_mask.cuda()
if self.normalize_learned_weight:
weight = self.weight.masked_fill((self.weight_mask == 0).unsqueeze(2), float('-inf'))
self.normalized_weight = F.softmax(weight, dim=1)
return
# following layer
if self.normalize_before:
layer = self.layer_norms[self.count-1](layer)
self.layers.append(layer)
def _pick_weights(self):
weight = self.normalized_weight if self.normalize_learned_weight else self.weight
weight = weight[self.count - 1, : self.count, :].view(-1, 1, 1, self.last_dim)
return weight
def pop(self):
assert len(self.layers) > 0
# D x 1 x 1 x [1, H/G, H]
weights = self._pick_weights()
# D x T x B x H
layers = torch.stack(self.layers, 0)
# linear combination
if self.weight_type in ['scalar', 'vector']:
ret = (layers * weights).sum(0)
else:
D, T, B, H = layers.size()
layers = layers.view(D, T, B, -1, weights.size(-1))
weights = weights.unsqueeze(3)
ret = (layers * weights).sum(0).view(T, B, H)
if self.normalize_before:
if self.out_dropout > 0:
return F.dropout(ret, p=self.out_dropout, training=self.training)
else:
return ret
if self.out_dropout > 0:
return F.dropout(self.layer_norms[self.count-1](ret), p=self.out_dropout, training=self.training)
else:
return self.layer_norms[self.count-1](ret)
def clean(self):
self.count = 0
self.layers = []
def forward(self):
pass
| 6,453
| 35.88
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/DynamicTransformer/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/DynamicTransformer/Models.py
|
import math
import torch
import onmt
from onmt.legacy.DynamicTransformer.Dlcl import DynamicLinearCombination
from onmt.models.transformers import TransformerEncoder, TransformerDecoder
from onmt.modules.dropout import embedded_dropout
from torch.utils.checkpoint import checkpoint
class DlclTransformerEncoder(TransformerEncoder):
"""Transformer encoder."""
def __init__(self, opt, dicts, positional_encoder, encoder_type='text'):
super().__init__(opt, dicts, positional_encoder, encoder_type)
self.history = DynamicLinearCombination(self.model_size, self.layers, is_encoder=True)
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
# clean layer history
self.history.clean()
# Embedding: batch_size x len_src x d_model
if self.input_type == "text":
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
# Scale the emb by sqrt(d_model)
emb = emb * math.sqrt(self.model_size)
# Adding positional encoding
emb = self.time_transformer(emb)
# Dropout
emb = self.preprocess_layer(emb)
# B x T x H -> T x B x H
context = emb.transpose(0, 1).contiguous()
self.history.push(context)
for i, layer in enumerate(self.layer_modules):
context = self.history.pop()
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context = checkpoint(custom_layer(layer), context, mask_src)
else:
context = layer(context, mask_src) # batch_size x len_src x d_model
self.history.push(context)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.history.pop()
context = self.postprocess_layer(context)
output_dict = {'context': context, 'src_mask': mask_src}
# return context, mask_src
return output_dict
class DlclTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, attribute_embeddings=None, ignore_source=False):
super().__init__(opt, dicts, positional_encoder,
attribute_embeddings=attribute_embeddings, ignore_source=ignore_source)
self.history = DynamicLinearCombination(self.model_size, self.layers, is_encoder=False)
def forward(self, input, context, src, atbs=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
self.history.clean()
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
if self.use_feature:
atb_emb = self.attribute_embeddings(atbs).unsqueeze(1).repeat(1, emb.size(1)) # B x H to 1 x B x H
emb = torch.cat([emb, atb_emb], dim=-1)
emb = torch.relu(self.feature_projector(emb))
if context is not None:
if self.encoder_type == "audio":
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
if context is not None:
if self.encoder_type == "audio":
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.transpose(0, 1).contiguous()
self.history.push(output)
for i, layer in enumerate(self.layer_modules):
output = self.history.pop()
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, coverage = checkpoint(custom_layer(layer), output, context, mask_tgt, mask_src)
# batch_size x len_src x d_model
else:
output, coverage = layer(output, context, mask_tgt, mask_src) # batch_size x len_src x d_model
# write into memory
self.history.push(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.history.pop()
output = self.postprocess_layer(output)
output_dict = { 'hidden': output, 'coverage': coverage }
# return output, None
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
self.history.clean()
context = decoder_state.context
buffers = decoder_state.attention_buffers
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
atbs = decoder_state.tgt_atb
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
else:
# prev_h = buffer[0] if buffer is None else None
# emb = self.time_transformer(emb, prev_h)
# buffer[0] = emb[1]
raise NotImplementedError
if isinstance(emb, tuple):
emb = emb[0]
# emb should be batch_size x 1 x dim
if self.use_feature:
atb_emb = self.attribute_embeddings(atbs).unsqueeze(1).expand_as(emb) # B x H to 1 x B x H
emb = torch.cat([emb, atb_emb], dim=-1)
emb = torch.relu(self.feature_projector(emb))
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if self.encoder_type == "audio" and src.data.dim() == 3:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
self.history.push(output)
for i, layer in enumerate(self.layer_modules):
output = self.history.pop()
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, context, mask_tgt, mask_src, buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
self.history.push(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.history.pop()
output = self.postprocess_layer(output)
return output, coverage
| 9,788
| 36.505747
| 114
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/UniversalTransformer/Layers.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class UniversalEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
position encoder: adding embedding based on position
time encoder: adding embedding based on time (the loop)
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, pos_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalEncoderLayer, self).__init__()
self.version = version
# position and time embedding is added into the input before the layer
self.pos_encoder = pos_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, t, pad_mask=None):
# apply layer normalization
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.pos_encoder(query) + self.time_encoder(t)
out, _ = self.multihead(query, query, query, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),
mask=pad_mask)
input = self.postprocess_ffn(out, input)
return input
class UniversalDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, position_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalDecoderLayer, self).__init__()
self.version = version
self.position_encoder = position_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, static=onmt.constants.static)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
#~ print(input.size())
#~ print(context.size())
#~ print(pad_mask_tgt.size())
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.position_encoder(query) + self.time_encoder(t)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage
def step(self, input, context, pos_step, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input, mask=pad_mask_tgt)
# add position encoding and time encoding (before the buffer because the previous steps are already added)
query = self.position_encoder(query, t=pos_step) + self.time_encoder(t)
if buffer is not None:
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
out, _ = self.multihead_tgt(query, buffer, buffer, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=None)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
class TimeEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=64):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(TimeEncoding , self).__init__()
self.len_max=len_max
self.d_model = d_model
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
## detele the old variable to avoid Pytorch's error when register new buffer
if hasattr(self, 'time_emb'):
del self.time_emb
times = torch.arange(0,new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales-1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = times.unsqueeze(1) * inv_timescales.unsqueeze(0)
time_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('time_emb', time_emb)
def forward(self, t):
# print('hello')
# out = word_emb + Variable(self.pos_emb[:len_seq, :][-1, :], requires_grad=False)
time_emb = Variable(self.time_emb[t, :], requires_grad=False) # 1 x dim
# out should have size 1 x 1 x dim
# all positions share the time embedding
# all batch elements share the time embedding
out = time_emb.unsqueeze(0)
return out
| 11,195
| 37.740484
| 156
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/UniversalTransformer/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/UniversalTransformer/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.UniversalTransformer.Layers import UniversalDecoderLayer, UniversalEncoderLayer
#~ from onmt.modules.ParallelTransformer.Layers import ParallelEncoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.modules.Checkpoint import checkpoint
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from torch.autograd import Variable
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class UniversalTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder, time_encoder):
super(UniversalTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.time_encoder = time_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.recurrent_layer = UniversalEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.positional_encoder, self.time_encoder, self.attn_dropout)
#~ self.layer_modules = nn.ModuleList([ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
#~ emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for t in range(self.layers):
context = self.recurrent_layer(context, mask_src, t, pad_mask) # batch_size x len_src x d_model
#~ for i, layer in enumerate(self.layer_modules):
#~
#~
#~ if len(self.layer_modules) - i <= onmt.Constants.checkpointing and self.training:
#~ context, norm_input = checkpoint(custom_layer(layer), context, mask_src, pad_mask)
#~
#~ print(type(context))
#~ else:
#~ context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
#~
#~ if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
#~ memory_bank.append(norm_input)
#~
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
return context, mask_src
class UniversalTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder, time_encoder):
super(UniversalTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.positional_encoder = positional_encoder
self.time_encoder = time_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.recurrent_layer = UniversalDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.positional_encoder, self.time_encoder, self.attn_dropout)
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def mark_pretrained(self):
self.pretrained_point = self.layers
def forward(self, input, context, src, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
#~ if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
#~ """ Adding positional encoding """
#~ emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
#~ memory_bank = None
for t in range(self.layers):
output, coverage = self.recurrent_layer(output, context, t, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
#~ for i, layer in enumerate(self.layer_modules):
#~ if len(self.layer_modules) - i <= onmt.Constants.checkpointing and self.training:
#~
#~ output, coverage = checkpoint(custom_layer(layer), output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
#~
#~ else:
#~ output, coverage = layer(output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context.transpose(0, 1)
buffer = decoder_state.buffer
src = decoder_state.src.transpose(0, 1)
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
output_buffer = list()
batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
#~ if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
#~ if self.time == 'positional_encoding':
#~ emb = self.time_transformer(emb, t=input.size(1))
pos_step = input.size(1)
# emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for t in range(self.layers):
buffer_ = buffer[t] if buffer is not None else None
assert(output.size(1) == 1)
output, coverage, buffer_ = self.recurrent_layer.step(output, context, pos_step, t, mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
#~ for i, layer in enumerate(self.layer_modules):
#~
#~ buffer_ = buffer[i] if buffer is not None else None
#~ assert(output.size(1) == 1)
#~ output, coverage, buffer_ = layer.step(output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
#~
#~ output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
decoder_state._update_state(buffer)
return output, coverage
| 13,602
| 38.428986
| 178
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/ParallelTransformer/Layers.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class ParallelEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0):
super(ParallelEncoderLayer, self).__init__()
self.version = version
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, pad_mask=None, residual_dropout=0.0):
query = self.preprocess_attn(input)
out, _ = self.multihead(query, query, query, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
if residual_dropout > 0:
input_ = F.dropout(input, residual_dropout, self.training, False)
input = self.postprocess_attn(out, input_, mask=pad_mask)
#~ input = self.postprocess_attn(out) + input
else:
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),
mask=pad_mask)
input = self.postprocess_ffn(out, input)
# return the query which is the normalized input
return input, query
#~
#~ class ParallelDecoderLayer(nn.Module):
#~ """Wraps multi-head attentions and position-wise feed forward into one layer of decoder
#~
#~ Args:
#~ h: number of heads
#~ d_model: dimension of model
#~ p: dropout probabolity
#~ d_ff: dimension of feed forward
#~
#~ Params:
#~ multihead_tgt: multi-head self attentions layer
#~ multihead_src: multi-head encoder-decoder attentions layer
#~ feedforward: feed forward layer
#~
#~ Input Shapes:
#~ query: batch_size x len_query x d_model
#~ key: batch_size x len_key x d_model
#~ value: batch_size x len_key x d_model
#~ context: batch_size x len_src x d_model
#~ mask_tgt: batch_size x len_query x len_key or broadcastable
#~ mask_src: batch_size x len_query x len_src or broadcastable
#~
#~ Output Shapes:
#~ out: batch_size x len_query x d_model
#~ coverage: batch_size x len_query x len_key
#~
#~ """
#~
#~ def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
#~ super(FCTDecoderLayer, self).__init__()
#~
#~ self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~ self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~ self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~
#~ self.multihead_tgt = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_tgt = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_tgt = FlatSumMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = FlatSumMultiHeadAttention(h, d_model, attn_p=attn_p)
#~
#~ if onmt.Constants.activation_layer == 'linear_relu_linear':
#~ ff_p = p
#~ feedforward = FeedForward(d_model, d_ff, ff_p)
#~ elif onmt.Constants.activation_layer == 'maxout':
#~ k = int(math.ceil(d_ff / d_model))
#~ feedforward = MaxOut(d_model, d_model, k)
#~ self.feedforward = Bottle(feedforward)
#~
#~
#~ def forward(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
#~
#~ """ Self attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_attn(input, mask=pad_mask_tgt)
#~
#~ if memory_bank is None:
#~ memory_bank = query.unsqueeze(0)
#~
#~ else:
#~ memory_bank = query.unsqueeze(0)
#~ memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # n_layer x batch_size x len_src x hidden
#~
#~
#~ out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
#~ query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
#~
#~ input = self.postprocess_attn(out, input)
#~
#~ """ Context Attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
#~ out, coverage = self.multihead_src(query, context, mask_src,
#~ query_mask=pad_mask_tgt, value_mask=pad_mask_src)
#~ input = self.postprocess_src_attn(out, input)
#~
#~ """ Feed forward layer
#~ layernorm > ffn > dropout > residual
#~ """
#~ out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
#~ mask=pad_mask_tgt)
#~ input = self.postprocess_ffn(out, input)
#~
#~ return input, memory_bank, coverage
#~
#~
#~ def step(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
#~
#~ query = self.preprocess_attn(input, mask=pad_mask_tgt)
#~
#~ if buffer is not None:
#~ buffer = torch.cat([buffer, query], dim=1)
#~ else:
#~ buffer = query
#~
#~ if memory_bank is None:
#~ memory_bank = buffer.unsqueeze(0)
#~
#~ else:
#~ memory_bank = torch.cat([memory_bank, buffer.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
#~
#~
#~ out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
#~ query_mask=None, value_mask=None)
#~
#~ input = self.postprocess_attn(out, input)
#~
#~ """ Context Attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
#~ out, coverage = self.multihead_src(query, context, mask_src,
#~ query_mask=None, value_mask=None)
#~ input = self.postprocess_src_attn(out, input)
#~
#~ """ Feed forward layer
#~ layernorm > ffn > dropout > residual
#~ """
#~ out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
#~ mask=pad_mask_tgt)
#~ input = self.postprocess_ffn(out, input)
#~
#~ return input, memory_bank, coverage, buffer
| 9,252
| 40.124444
| 123
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/ParallelTransformer/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/ParallelTransformer/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.ParallelTransformer.Layers import ParallelEncoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.modules.Checkpoint import checkpoint
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from torch.autograd import Variable
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class ParallelTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder):
super(ParallelTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
if hasattr(opt, 'grow_dropout'):
self.grow_dropout = opt.grow_dropout
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
#~ self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def add_layers(self, n_new_layer):
self.new_modules = list()
self.layers += n_new_layer
for i in range(n_new_layer):
layer = ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout)
# the first layer will use the preprocessing which is the last postprocessing
if i == 0:
layer.preprocess_attn.load_state_dict(self.postprocess_layer.state_dict())
#~ layer.preprocess_attn.layer_norm.function.weight.requires_grad = False
#~ layer.preprocess_attn.layer_norm.function.bias.requires_grad = False
#~ if hasattr(layer.postprocess_attn, 'k'):
#~ layer.postprocess_attn.k.data.fill_(0.01)
# replace the last postprocessing layer with a new one
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.layer_modules.append(layer)
def mark_pretrained(self):
self.pretrained_point = self.layers
def forward(self, input, grow=False):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
if grow:
return self.forward_grow(input)
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context, norm_input = checkpoint(custom_layer(layer), context, mask_src, pad_mask)
#~ print(type(context))
else:
context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
memory_bank.append(norm_input)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank.append(context)
memory_bank = torch.stack(memory_bank)
return memory_bank, mask_src
def forward_grow(self, input):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
with torch.no_grad():
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for i in range(self.pretrained_point):
layer = self.layer_modules[i]
context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
memory_bank.append(norm_input)
for i in range(self.layers - self.pretrained_point):
res_drop_rate = 0.0
if i == 0:
res_drop_rate = self.grow_dropout
layer = self.layer_modules[self.pretrained_point + i]
context, norm_input = layer(context, mask_src, pad_mask, residual_dropout=res_drop_rate) # batch_size x len_src x d_model
memory_bank.append(norm_input)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank.append(context)
memory_bank = torch.stack(memory_bank)
return memory_bank, mask_src
class ParallelTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(ParallelTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
if hasattr(opt, 'grow_dropout'):
self.grow_dropout = opt.grow_dropout
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
#~ self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([DecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def mark_pretrained(self):
self.pretrained_point = self.layers
def add_layers(self, n_new_layer):
self.new_modules = list()
self.layers += n_new_layer
for i in range(n_new_layer):
layer = DecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout)
# the first layer will use the preprocessing which is the last postprocessing
if i == 0:
# layer.preprocess_attn = self.postprocess_layer
layer.preprocess_attn.load_state_dict(self.postprocess_layer.state_dict())
#~ layer.preprocess_attn.layer_norm.function.weight.requires_grad = False
#~ layer.preprocess_attn.layer_norm.function.bias.requires_grad = False
# replace the last postprocessing layer with a new one
#~ if hasattr(layer.postprocess_attn, 'k'):
#~ layer.postprocess_attn.k.data.fill_(0.01)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.layer_modules.append(layer)
def forward(self, input, context, src, grow=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
if grow:
return self.forward_grow(input, context, src)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
#~ memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, coverage = checkpoint(custom_layer(layer), output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
else:
output, coverage = layer(output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def forward_grow(self, input, context, src):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
with torch.no_grad():
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
for i in range(self.pretrained_point):
layer = self.layer_modules[i]
output, coverage = layer(output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
for i in range(self.layers - self.pretrained_point):
res_drop_rate = 0.0
if i == 0:
res_drop_rate = self.grow_dropout
layer = self.layer_modules[self.pretrained_point + i]
output, coverage = layer(output, context[self.pretrained_point + i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src, residual_dropout=res_drop_rate) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
#~ def step(self, input, context, src, buffer=None):
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
# note: transpose 1-2 because the first dimension (0) is the number of layer
context = decoder_state.context.transpose(1, 2)
buffer = decoder_state.buffer
src = decoder_state.src.transpose(0, 1)
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
output_buffer = list()
batch_size = input.size(0)
input_ = input[:,-1].unsqueeze(1)
# print(input_.size())
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = self.time_transformer(emb, t=input.size(1))
else:
prev_h = buffer[0] if buffer is None else None
emb = self.time_transformer(emb, prev_h)
buffer[0] = emb[1]
if isinstance(emb, tuple):
emb = emb[0] # emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
buffer_ = buffer[i] if buffer is not None else None
assert(output.size(1) == 1)
output, coverage, buffer_ = layer.step(output, context[i], mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
decoder_state._update_state(buffer)
return output, coverage
class ParallelTransformerDecodingState(DecoderState):
def __init__(self, src, context, beamSize=1):
self.src = src
self.context = context
self.beamSize = beamSize
self.buffer = None
self.input_seq = None
self.context = context.transpose(1, 2)
self.context = Variable(self.context.data.repeat(1, 1, beamSize, 1))
def _update_state(self, buffer):
self.buffer = buffer
def _update_beam(self, beam, b, remainingSents, idx):
for tensor in [self.src, self.input_seq] :
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beamSize, remainingSents)[:, :, idx]
if isinstance(tensor, Variable):
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
else:
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
nl, br_, t_, d_ = self.buffer.size()
sent_states = self.buffer.view(nl, self.beamSize, remainingSents, t_, d_)[:, :, idx, :, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def _prune_complete_beam(self, activeIdx, remainingSents):
model_size = self.context.size(-1)
def updateActive4D_time_first(t):
# select only the remaining active sentences
nl, t_, br_, d_ = t.size()
view = t.data.view(nl, t_, -1, remainingSents, model_size)
newSize = list(t.size())
newSize[2] = newSize[2] * len(activeIdx) // remainingSents
return Variable(view.index_select(3, activeIdx)
.view(*newSize))
def updateActive2D(t):
if isinstance(t, Variable):
# select only the remaining active sentences
view = t.data.view(-1, remainingSents)
newSize = list(t.size())
newSize[-1] = newSize[-1] * len(activeIdx) // remainingSents
return Variable(view.index_select(1, activeIdx)
.view(*newSize))
else:
view = t.view(-1, remainingSents)
newSize = list(t.size())
newSize[-1] = newSize[-1] * len(activeIdx) // remainingSents
new_t = view.index_select(1, activeIdx).view(*newSize)
return new_t
def updateActive4D(t):
# select only the remaining active sentences
nl, br_, t_, d_ = t.size()
view = t.data.view(nl, -1, remainingSents, t_, model_size)
newSize = list(t.size())
newSize[1] = newSize[1] * len(activeIdx) // remainingSents
return Variable(view.index_select(2, activeIdx)
.view(*newSize))
self.context = updateActive4D_time_first(self.context)
self.input_seq = updateActive2D(self.input_seq)
self.src = updateActive2D(self.src)
self.buffer = updateActive4D(self.buffer)
| 25,098
| 39.417069
| 175
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/distance_transformer_layers.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import LearnableRelMultiHeadAttn
class DistanceTransformerEncoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0,
max_len=64, **kwargs):
super(DistanceTransformerEncoderLayer, self).__init__()
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
# self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
d_head = d_model // h
self.multihead = LearnableRelMultiHeadAttn(h, d_model, d_head, dropatt=attn_p, max_len=max_len)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p, variational=self.variational)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, incremental=False, incremental_cache=None, mems=None):
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _, incremental_cache = self.multihead(query, attn_mask=attn_mask, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class DistanceTransformerDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0, max_len=64):
super(DistanceTransformerDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = LearnableRelMultiHeadAttn(h, d_model, d_head, dropatt=attn_p, max_len=64)
# self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, share=1)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
# print(query.size(), pos_emb.size(), mask_tgt.size(), mems.size() if mems is not None else 0)
out, _, = self.multihead_tgt(query, attn_mask=mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
def step(self, input, context, mask_tgt, mask_src, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt.step(query, attn_mask=mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage, buffer = self.multihead_src.step(query, context, context, mask_src, buffer=buffer)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
| 9,073
| 40.43379
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/relative_unified_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, LearnablePostionEmbedding, \
StreamState, StreamDecodingState
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
torch.set_printoptions(profile="full")
def seperate_tensor(input, lengths):
bsz, tgt_len = input.size(1), input.size(0)
assert (bsz == 1)
outputs = list()
# starting from the first position of the tensor
offset = 0
for length in lengths:
segment = input.narrow(0, offset, length)
offset += length
outputs.append(segment)
return outputs
class RelativeUnifiedTransformer(UnifiedTransformer):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_memory_size = opt.max_memory_size
# build_modules will be called from the inherited constructor
super(RelativeUnifiedTransformer, self).__init__(opt, tgt_embedding, src_embedding,
generator, positional_encoder,
language_embeddings=language_embeddings,
encoder_type=encoder_type)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# learnable position encoding
if self.learnable_position_encoding:
self.max_pos_length = opt.max_pos_length
# pos_emb = self.model_size // self.n_heads
pos_emb = self.model_size
self.positional_encoder = LearnablePostionEmbedding(self.max_pos_length, pos_emb)
print("* Learnable position encoding with max %d positions" % self.max_pos_length)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def gen_mask(self, src, tgt):
# generate the mask for the mini-batch data
# both src and tgt are T x B
input_seq = torch.cat([src, tgt], dim=0)
seq_len = input_seq.size(0)
if self.bidirectional:
bsz, src_len = src.size(1), src.size(0)
tgt_len = tgt.size(0)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool().unsqueeze(-1)
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0)
attn_mask = attn_mask | pad_mask
else:
attn_mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1).bool().unsqueeze(-1) # T x T x -1
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0) # 1 x T x B
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = attn_mask | pad_mask
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def create_mask_stream(self, src, tgt, src_lengths, tgt_lengths, mem_length=0):
if self.bidirectional:
mask = None
prev_length = 0
# go through the src and tgt lengths to create mask
for i, (src_len, tgt_len) in enumerate(zip(src_lengths, tgt_lengths)):
# print("Step ", i, src_len, tgt_len)
# first, the source sentence should have full bidirectional attention to the end of itself
src_mask = src.new_zeros(src_len, src_len + prev_length)
if prev_length == 0:
mask = src_mask
else:
# everything in the past doesn't look at the future
prev_mask = src.new_ones(prev_length, src_len)
if mask is not None:
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (src_len + prev_length)
else:
mask = prev_mask
mask = torch.cat([mask, src_mask], dim=0) # (src_len + prev_length) x (src_len + prev_length)
prev_length += src_len
# the target sentence
# everything in the past doesn't look at the future
prev_mask = tgt.new_ones(prev_length, tgt_len)
# the target has unidirectional attention towards everything in the past
mlen = prev_length
qlen = tgt_len
klen = qlen + mlen
tgt_mask = torch.triu(tgt.new_ones(qlen, klen), diagonal=1 + mlen)
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (prev_len + tgt_len)
mask = torch.cat([mask, tgt_mask], dim=0) #
prev_length += tgt_len
if mem_length > 0:
past_mask = src.new_zeros(prev_length, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
else:
seq_len = sum(src_lengths) + sum(tgt_lengths)
mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1)
if mem_length > 0:
past_mask = src.new_zeros(seq_len, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
return attn_mask
def forward_stream(self, batch, **kwargs):
streaming_state = kwargs.get('streaming_state', None)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # (len_tgt x batch_size) x 1
bsz = src.size(1)
assert bsz == 1
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
# First: separate the input tensor into segments
src_segments = seperate_tensor(src, src_lengths)
tgt_segments = seperate_tensor(tgt, tgt_lengths)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
# Second: Embedding
src_embeddings = []
for src_segment in src_segments:
src_emb = F.embedding(
src_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
src_embeddings.append(src_emb)
tgt_embeddings = []
for tgt_segment in tgt_segments:
tgt_emb = F.embedding(
tgt_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
tgt_embeddings.append(tgt_emb)
# add src1, tgt1, src2, tgt2 .... srcn, tgtn
all_embeddings = []
for (src_emb, tgt_emb) in zip(src_embeddings, tgt_embeddings):
all_embeddings.append(src_emb)
all_embeddings.append(tgt_emb)
emb = torch.cat(all_embeddings, dim=0)
# prepare attention mask
mem_length = streaming_state.prev_tgt_mem_size
attn_mask = self.create_mask_stream(src, tgt, src_lengths, tgt_lengths, mem_length=mem_length)
klen = emb.size(0) + mem_length
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = streaming_state.tgt_buffer[i]
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
# context and context_mask are None
streaming_state.tgt_buffer[i] = buffer
# final layer norm
output = self.postprocess_layer(output)
# update the memory and then prune
streaming_state.prev_tgt_mem_size += klen
streaming_state.prune_target_memory(self.max_memory_size)
# now we have to separate the target states from the "output" to generate translations
target_outputs = []
contexts = []
offset = 0
for (src_len, tgt_len) in zip(src_lengths, tgt_lengths):
source_output = output.narrow(0, offset, src_len)
offset += src_len
target_output = output.narrow(0, offset, tgt_len)
offset += tgt_len
target_outputs.append(target_output)
contexts.append(source_output)
context = torch.cat(contexts, dim=0)
output = torch.cat(target_outputs, dim=0)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': None}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
output_dict['streaming_state'] = streaming_state
return output_dict
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
if streaming:
return self.forward_stream(batch, **kwargs)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # len_tgt x batch_size
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
src_emb = F.embedding(
src, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
tgt_emb = F.embedding(
tgt, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
klen = src_len + tgt_len
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, None, pos_emb, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
input = input.transpose(0, 1)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
emb = src_emb
src_len = input.size(0)
bsz = input.size(1)
mask_src_src = input.eq(onmt.constants.PAD).byte() # B x 1 x src_len
mask_src = mask_src_src.unsqueeze(0)
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output)
klen = src_len
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output, decoder_state
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state):
src = decoder_state.src if decoder_state.src is not None else None
tgt = input.transpose(0, 1)
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffers = decoder_state.attention_buffers
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
# src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
# * math.sqrt(self.model_size)
input_ = tgt[-1:]
tgt_emb = embedded_dropout(self.tgt_embedding, input_, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
# src_lang_emb = self.language_embeddings(src_lang)
# src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
# emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
emb = tgt_emb
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# last attn_mask step
attn_mask = attn_mask[-1:, :, :]
klen = src_len + tgt_len
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True,
incremental_cache=buffer) # context and context_mask are None
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
# output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type)
# forward pass through the input to get the buffer
# src_transposed = src_transposed.repeat(beam_size, 1)
encoder_output, decoder_state = self.encode(src_transposed, decoder_state, input_pos=src_pos,
input_lang=src_lang)
decoder_state.src_lang = src_lang
buffers = decoder_state.attention_buffers
bsz = src.size(1)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src.device)
for l in buffers:
buffer_ = buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = StreamState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
| 24,270
| 36.982786
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/memory_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.bottle import Bottle
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, LearnablePostionEmbedding, \
StreamState, StreamDecodingState
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
def seperate_tensor(input, lengths):
bsz, tgt_len = input.size(1), input.size(0)
assert (bsz == 1)
outputs = list()
# starting from the first position of the tensor
offset = 0
for length in lengths:
segment = input.narrow(0, offset, length)
offset += length
outputs.append(segment)
return outputs
class MemoryTransformerDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0):
super(MemoryTransformerDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(h, d_model, d_head, dropatt=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input_, context, pos_emb, mask_tgt, mask_src, mems=None,
incremental=False, incremental_cache=None):
# incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer with memory
layernorm > attn > dropout > residual
"""
assert context is None, "This model does not have an context encoder"
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
query = self.preprocess_attn(input_)
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_attn(out, input_)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input_))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_ffn(out, input_)
else:
coverage = None
if incremental:
return input_, coverage, incremental_cache
return input_, coverage
def step(self, input, context, pos_emb, mask_tgt, mask_src, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
class MemoryTransformer(UnifiedTransformer):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_memory_size = opt.max_memory_size
self.mem_len = self.max_memory_size
self.dictionary = kwargs.get('dictionary', None)
# build_modules will be called from the inherited constructor
super(MemoryTransformer, self).__init__(opt, tgt_embedding, src_embedding,
generator, positional_encoder,
language_embeddings=language_embeddings,
encoder_type=encoder_type)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# learnable position encoding
if self.learnable_position_encoding:
self.max_pos_length = opt.max_pos_length
# pos_emb = self.model_size // self.n_heads
pos_emb = self.model_size
self.positional_encoder = LearnablePostionEmbedding(self.max_pos_length, pos_emb)
print("* Learnable position encoding with max %d positions" % self.max_pos_length)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def gen_mask(self, src, tgt):
# generate the mask for the mini-batch data
# both src and tgt are T x B
input_seq = torch.cat([src, tgt], dim=0)
seq_len = input_seq.size(0)
if self.bidirectional:
bsz, src_len = src.size(1), src.size(0)
tgt_len = tgt.size(0)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool().unsqueeze(-1)
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0)
attn_mask = attn_mask | pad_mask
else:
attn_mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1).bool().unsqueeze(-1) # T x T x -1
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0) # 1 x T x B
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = attn_mask | pad_mask
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = MemoryTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def create_mask_stream(self, src, tgt, src_lengths, tgt_lengths, mem_length=0):
if self.bidirectional:
mask = None
prev_length = 0
# go through the src and tgt lengths to create mask
for i, (src_len, tgt_len) in enumerate(zip(src_lengths, tgt_lengths)):
# print("Step ", i, src_len, tgt_len)
# first, the source sentence should have full bidirectional attention to the end of itself
src_mask = src.new_zeros(src_len, src_len + prev_length)
if prev_length == 0:
mask = src_mask
else:
# everything in the past doesn't look at the future
prev_mask = src.new_ones(prev_length, src_len)
if mask is not None:
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (src_len + prev_length)
else:
mask = prev_mask
mask = torch.cat([mask, src_mask], dim=0) # (src_len + prev_length) x (src_len + prev_length)
prev_length += src_len
# the target sentence
# everything in the past doesn't look at the future
prev_mask = tgt.new_ones(prev_length, tgt_len)
# the target has unidirectional attention towards everything in the past
mlen = prev_length
qlen = tgt_len
klen = qlen + mlen
tgt_mask = torch.triu(tgt.new_ones(qlen, klen), diagonal=1 + mlen)
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (prev_len + tgt_len)
mask = torch.cat([mask, tgt_mask], dim=0) #
prev_length += tgt_len
if mem_length > 0:
past_mask = src.new_zeros(prev_length, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
else:
seq_len = sum(src_lengths) + sum(tgt_lengths)
# mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1)
# if mem_length > 0:
# past_mask = src.new_zeros(seq_len, mem_length)
# mask = torch.cat([past_mask, mask], dim=1)
mask = torch.triu(src.new_ones(seq_len, seq_len + mem_length), diagonal=1 + mem_length)
attn_mask = mask.bool().unsqueeze(-1)
return attn_mask
def forward_stream(self, batch, **kwargs):
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.mems
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # (len_tgt x batch_size) x 1
bsz = src.size(1)
assert bsz == 1
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
# First: separate the input tensor into segments
src_segments = seperate_tensor(src, src_lengths)
tgt_segments = seperate_tensor(tgt, tgt_lengths)
# if self.dictionary is not None:
# for src_, tgt_ in zip(src_segments, tgt_segments):
# src_ = src_.squeeze(1)
# tgt_ = tgt_.squeeze(1)
#
# src_words = " ".join(self.dictionary.convertToLabels(src_, onmt.constants.EOS))
# tgt_words = " ".join(self.dictionary.convertToLabels(tgt_, onmt.constants.EOS))
# print(src_words, tgt_words)
# input("Press any key to continue...")
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
# Second: Embedding
src_embeddings = []
for src_segment in src_segments:
src_emb = F.embedding(
src_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
src_embeddings.append(src_emb)
tgt_embeddings = []
for tgt_segment in tgt_segments:
tgt_emb = F.embedding(
tgt_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
tgt_embeddings.append(tgt_emb)
# add src1, tgt1, src2, tgt2 .... srcn, tgtn
all_embeddings = []
for (src_emb, tgt_emb) in zip(src_embeddings, tgt_embeddings):
all_embeddings.append(src_emb)
all_embeddings.append(tgt_emb)
emb = torch.cat(all_embeddings, dim=0)
# prepare attention mask
mem_length = streaming_state.mems[0].size(0) if mems is not None else 0
attn_mask = self.create_mask_stream(src, tgt, src_lengths, tgt_lengths, mem_length=mem_length)
qlen = emb.size(0)
klen = emb.size(0) + mem_length
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
hids = [output]
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
mems_i = None if mems is None else mems[i]
output, coverage = layer(output, None, pos_emb, attn_mask, None, mems=mems_i)
# context and context_mask are None
hids.append(output)
# final layer norm
output = self.postprocess_layer(output)
# update the memory and then prune
streaming_state.update_mems(hids, qlen)
# now we have to separate the target states from the "output" to generate translations
target_outputs = []
contexts = []
offset = 0
for (src_len, tgt_len) in zip(src_lengths, tgt_lengths):
source_output = output.narrow(0, offset, src_len)
offset += src_len
target_output = output.narrow(0, offset, tgt_len)
offset += tgt_len
target_outputs.append(target_output)
contexts.append(source_output)
context = torch.cat(contexts, dim=0)
output = torch.cat(target_outputs, dim=0)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': None}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
output_dict['streaming_state'] = streaming_state
return output_dict
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
if streaming:
return self.forward_stream(batch, **kwargs)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # len_tgt x batch_size
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
src_emb = F.embedding(
src, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
tgt_emb = F.embedding(
tgt, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
klen = src_len + tgt_len
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, None, pos_emb, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
input = input.transpose(0, 1)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
emb = src_emb
src_len = input.size(0)
bsz = input.size(1)
mask_src_src = input.eq(onmt.constants.PAD).expand(src_len, src_len, bsz)
buffer = buffers[0] if 0 in buffers else None
if buffer is not None:
mem_len = buffer['k'].size(0)
else:
mem_len = 0
if mem_len > 0:
# print(mask_src_src.size())
past_mask = input.new_zeros(src_len, mem_len).bool().unsqueeze(-1).expand(src_len, mem_len, bsz)
mask_src_src = torch.cat([past_mask, mask_src_src], dim=1)
mask_src = mask_src_src
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
klen = src_len + mem_len
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
# if i == 0 and buffer is not None:
# key = next(iter(buffer))
# print(buffer[key].size())
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output, decoder_state
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state, **kwargs):
src = decoder_state.src if decoder_state.src is not None else None
tgt = input.transpose(0, 1)
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffers = decoder_state.attention_buffers
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
# src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
# * math.sqrt(self.model_size)
input_ = tgt[-1:]
tgt_emb = embedded_dropout(self.tgt_embedding, input_, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
# src_lang_emb = self.language_embeddings(src_lang)
# src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = tgt_emb
# prepare self-attention mask
# attn_mask = self.gen_mask(src, tgt)
buffer = buffers[0] if 0 in buffers else None
if buffer is not None:
mem_len = buffer['k'].size(0)
else:
mem_len = 0
qlen = tgt_len
klen = qlen + mem_len
attn_mask = torch.triu(emb.new_ones(qlen, klen), diagonal=1+mem_len).bool().unsqueeze(-1)
# last attn_mask step
attn_mask = attn_mask[-1:, :, :]
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True,
incremental_cache=buffer) # context and context_mask are None
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
# output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
# pruning
max_mem_size = self.max_memory_size + tgt_len + 1
for i in range(self.layers):
buffer = buffers[i] if i in buffers else None
for k in buffer:
v = buffer[k]
buffer[k] = v[-max_mem_size:, :, :]
decoder_state.update_attention_buffer(buffer, i)
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=2, streaming=False, previous_decoding_state=None):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
if previous_decoding_state is None:
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True)
else:
src = src.repeat(1, beam_size)
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size,
type=type, cloning=False)
decoder_state.attention_buffers = previous_decoding_state.attention_buffers
# forward pass through the input to get the buffer
src_transposed = src_transposed.repeat(beam_size, 1)
encoder_output, decoder_state = self.encode(src_transposed, decoder_state, input_pos=src_pos,
input_lang=src_lang)
decoder_state.src_lang = src_lang
# buffers = decoder_state.attention_buffers
# bsz = src.size(1)
# new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
# new_order = new_order.to(src.device)
#
# for l in buffers:
# buffer_ = buffers[l]
# if buffer_ is not None:
# for k in buffer_.keys():
# t_, br_, d_ = buffer_[k].size()
# buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = MemoryState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.max_memory_size = src_memory_size + tgt_memory_size
class MemoryState(object):
def __init__(self, nlayers, mem_len, device, dtype):
self.mem_len = mem_len
self.mems = []
self.nlayers = nlayers
# n+1 memory slots (embeddings and n layers)
# but maybe we don't need to store the upper layer?
for i in range(self.nlayers + 1):
empty = torch.empty(0, dtype=dtype, device=device)
self.mems.append(empty)
def update_mems(self, hids, qlen):
# does not deal with None
if self.mems is None:
return None
mlen = self.mems[0].size(0) if self.mems is not None else 0
# mems is not None
assert len(hids) == len(self.mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
# Important:
self.mems = new_mems
# self.src_buffer = defaultdict(lambda: None)
# self.prev_src_mem_size = 0
# self.src_lengths = []
# self.tgt_buffer = defaultdict(lambda: None)
# self.prev_tgt_mem_size = 0
# self.tgt_lengths = []
#
# self.context_memory = None
# def init_mems(self):
# if self.mem_len > 0:
# mems = []
# param = next(self.parameters())
# for i in range(self.n_layer + 1):
# empty = torch.empty(0, dtype=param.dtype, device=param.device)
# mems.append(empty)
#
# return mems
# else:
# return None
| 32,849
| 37.06489
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/reformer.py
|
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. """
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from onmt.modules.lsh_attention import LSHSelfAttention
from onmt.models.transformers import PrePostProcessing
from onmt.modules.linear import FeedForward
from typing import Callable, Dict, List, Optional, Tuple
def apply_chunking_to_forward(
chunk_size: int, chunk_dim: int, forward_fn: Callable[..., torch.Tensor], *input_tensors
) -> torch.Tensor:
"""
This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size`
over the dimension `chunk_dim`.
It then applies a layer `forward_fn` to each chunk independently to save memory.
If the `forward_fn` is independent across the `chunk_dim` this function will yield the
same result as not applying it.
Args:
chunk_size: int - the chunk size of a chunked tensor. `num_chunks` = `len(input_tensors[0]) / chunk_size`
chunk_dim: int - the dimension over which the input_tensors should be chunked
forward_fn: fn - the forward fn of the model
input_tensors: tuple(torch.Tensor) - the input tensors of `forward_fn` which are chunked
Returns:
a Tensor with the same shape the foward_fn would have given if applied
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.chunk_size_lm_head, self.seq_len_dim, self.forward_chunk, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape
assert all(
input_tensor.shape == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
class ReformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
self.variational = opt.variational_dropout
self.death_rate = death_rate
d_model = opt.model_size
p = opt.dropout
super(ReformerEncoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.self_attention = LSHSelfAttention(opt)
self.feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, opt.variational_dropout)
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
out, _, _ = self.self_attention(query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input
| 5,517
| 41.446154
| 122
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer_layers.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.adaptive.relative_self_attention import AdaptiveRelativeAttn
from onmt.modules.adaptive.encdec_attention import AdaptiveEncDecAttn
from onmt.modules.adaptive.feed_forward import AdaptiveFeedForward
class RelativeUniversalEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
self.adaptive_type = opt.adaptive
self.factor_size = opt.layers
# this model defaults as fast relative self attention
if self.adaptive_type == 'universal':
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.multihead = AdaptiveRelativeAttn(opt.model_size, opt.n_heads, self.factor_size, opt.attn_dropout)
self.feedforward = AdaptiveFeedForward(opt.model_size, opt.inner_size, self.factor_size,
opt.dropout, variational=self.variational)
def forward(self, input, pos_emb, layer_vector, attn_mask, incremental=False, incremental_cache=None, mems=None):
if self.adaptive_type == 'universal':
input = input + layer_vector
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
# if self.training and self.death_rate > 0:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.adaptive_type == 'universal':
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, layer_vector, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
if self.adaptive_type == 'universal':
out = self.feedforward(self.preprocess_ffn(input))
else:
out = self.feedforward(self.preprocess_ffn(input), layer_vector)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class RelativeUniversalDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.factor_size = opt.layers
self.adaptive_type = opt.adaptive
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.adaptive_type == 'universal':
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = AdaptiveEncDecAttn(opt.n_heads, opt.model_size, self.factor_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.adaptive_type == 'universal':
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.multihead_tgt = AdaptiveRelativeAttn(opt.model_size, opt.n_heads, self.factor_size,
opt.attn_dropout)
self.feedforward = AdaptiveFeedForward(opt.model_size, opt.inner_size, self.factor_size,
opt.dropout, variational=self.variational)
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, pos_emb, layer_vector, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
# sum up input with the layer embedding
if self.adaptive_type == 'universal':
input = input + layer_vector
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.adaptive_type == 'universal':
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, layer_vector, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
if self.adaptive_type == 'universal':
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, layer_vector, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
if self.adaptive_type == 'universal':
out = self.feedforward(self.preprocess_ffn(input))
else:
out = self.feedforward(self.preprocess_ffn(input), layer_vector)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
| 10,170
| 45.231818
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/distance_transformer.py
|
import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.distance_transformer_layers import DistanceTransformerEncoderLayer, DistanceTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
class DistanceTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
# build_modules will be called from the inherited constructor
super(DistanceTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
self.positional_encoder = None
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Encoder with Distance Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = DistanceTransformerEncoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
variational=self.varitional_dropout, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
if streaming:
raise NotImplementedError
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.src_mems
# mem_len = streaming_state.src_mems[0].size(0)
mem_len = streaming_state.prev_src_mem_size
input_length = kwargs.get('src_lengths', None)
streaming_state = kwargs.get('streaming_state', None)
mask_src = self.create_stream_mask(input, input_length, mem_len)
mask_src = mask_src.unsqueeze(2)
else:
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.double_position:
assert input_pos is not None
# flatten
src_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.contiguous().view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_)
abs_pos = abs_pos.squeeze(1).view(src_len, bsz, -1)
else:
abs_pos = None
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
else:
if streaming:
raise NotImplementedError
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose().unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
if self.double_position and abs_pos is not None:
# adding position encoding
emb = emb + abs_pos
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
# because the batch dimension is lacking
# B x T x H -> T x B x H
context = emb
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
if streaming:
buffer = streaming_state.src_buffer[i]
context, buffer = layer(context, mask_src, incremental=True, incremental_cache=buffer)
streaming_state.src_buffer[i] = buffer
else:
context = layer(context, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class DistanceTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
# build_modules will be called from the inherited constructor
super(DistanceTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Distance Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = DistanceTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def create_context_mask(self, input, src, src_lengths, tgt_lengths, extra_context_length=0):
"""
Generate the mask so that part of the target attends to a part of the source
:param extra_context_length:
:param input:
:param src:
:param src_lengths:
:param tgt_lengths:
:return:
"""
mask = None
if self.stream_context == 'global':
# Global context: one target attends to everything in the source
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current sent attend to current src sent and all src in the past
current_mask = input.new_zeros(tgt_length, src_length + prev_src_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
elif self.stream_context in ['local', 'limited']:
# Local context: only attends to the aligned context
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current tgt sent attend to only current src sent
if prev_src_length > 0:
current_mask = torch.cat([input.new_ones(tgt_length, prev_src_length - extra_context_length),
input.new_zeros(tgt_length, src_length + extra_context_length)], dim=-1)
else:
current_mask = input.new_zeros(tgt_length, src_length + extra_context_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
mask = mask.bool()
return mask
def create_self_attn_mask(self, input, tgt_lengths, prev_tgt_mem_size):
"""
Create a mask for the target words attending to the past
:param input:
:param tgt_lengths:
:param prev_tgt_mem_size:
:return:
"""
if self.stream_context in ['local', 'global']:
qlen = sum(tgt_lengths.tolist())
mlen = prev_tgt_mem_size
klen = qlen + mlen
mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen).bool()[:, :, None]
elif self.stream_context in ['limited']:
# past_length = prev_tgt_mem_size
mask = None
# assert prev_tgt_mem_size == 0, "This model is limited and doesn't accept memory"
for length in tgt_lengths:
past_length = mask.size(0) if mask is not None else 0
if past_length > 0:
# don't look at the past
past_mask = input.new_ones(length, past_length)
else:
past_mask = None
# pay attention to the past words in the current sentence
current_mask = torch.triu(input.new_ones(length, length), diagonal=1)
if past_mask is not None:
current_mask = torch.cat([past_mask, current_mask], dim=1)
if mask is None:
mask = current_mask
else:
no_future_mask = input.new_ones(past_length, length)
mask = torch.cat([mask, no_future_mask], dim=1)
mask = torch.cat([mask, current_mask], dim=0)
mask = mask.bool().unsqueeze(-1)
return mask
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if streaming:
src_lengths = kwargs.get("src_lengths", None)
tgt_lengths = kwargs.get("tgt_lengths", None)
streaming_state = kwargs.get("streaming_state")
# mems = streaming_state.tgt_mems
mem_len = streaming_state.prev_tgt_mem_size
extra_context = streaming_state.extra_context
extra_context_length = extra_context.size(0) if extra_context is not None else 0
# mem_len = mems[0].size(0) if mems is not None else 0
else:
mem_len = 0
mems = None
extra_context = None
if self.double_position:
assert input_pos is not None
tgt_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_).squeeze(1).view(tgt_len, bsz, -1)
emb = emb + abs_pos
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
if streaming:
context_attn_mask = self.create_context_mask(input, src,
src_lengths, tgt_lengths,
extra_context_length)
mask_src = context_attn_mask.unsqueeze(0)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
if streaming:
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, mem_len)
else:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
dec_attn_mask = dec_attn_mask.gt(0)
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
output = self.preprocess_layer(emb.contiguous())
if streaming:
hids = [output]
if extra_context is not None:
context = torch.cat([extra_context, context], dim=0)
# print(context.size(), context_attn_mask.size())
for i, layer in enumerate(self.layer_modules):
# batch_size x src_len x d_model output, coverage = layer(output, context, pos_emb, self.r_w_bias,
# self.r_r_bias, dec_attn_mask, mask_src)
# mems_i = mems[i] if mems is not None and streaming and
# self.stream_context in ['local', 'global'] else None
if streaming:
buffer = streaming_state.tgt_buffer[i]
output, coverage, buffer = layer(output, context, dec_attn_mask, context_attn_mask,
incremental=True, incremental_cache=buffer, reuse_source=False)
streaming_state.tgt_buffer[i] = buffer
else:
output, coverage, _ = layer(output, context, dec_attn_mask, mask_src)
# if streaming:
# hids.append(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
if streaming:
streaming_state.prev_tgt_mem_size += sum(tgt_lengths.tolist())
streaming_state.prune_target_memory(self.max_memory_size)
# if we use the extra context: keep the last context
if self.extra_context_size > 0:
extra_context = context[-self.extra_context_size:].detach()
streaming_state.extra_context = extra_context
# if self.stream_context in ['local', 'global']:
# streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
if streaming:
return self.step_streaming(input, decoder_state)
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
# emb = self.word_lut(input) * math.sqrt(self.model_size)
if self.double_position:
input_pos = torch.arange(input.size(0), dtype=emb.dtype, device=emb.device)
input_pos = input_pos.unsqueeze(1).repeat(1, input.size(1))
tgt_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_).squeeze(1).view(tgt_len, bsz, -1)
emb = emb + abs_pos[-1:, :, :]
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
dec_attn_mask = dec_attn_mask.gt(0)
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
# assert (output.size(0) == 1)
# output, coverage, buffer = layer.step(output, context, pos_emb,
# dec_attn_mask, mask_src, buffer=buffer)
output, coverage, buffer = layer(output, context, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
def step_streaming(self, input, decoder_state):
"""Step function in streaming case"""
raise NotImplementedError
# context = decoder_state.context
# lang = decoder_state.tgt_lang
# streaming_state = decoder_state.streaming_state
#
# # for global model: push the context in
#
# if decoder_state.concat_input_seq:
# if decoder_state.input_seq is None:
# decoder_state.input_seq = input
# else:
# # concatenate the last input to the previous input sequence
# decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
# input = decoder_state.input_seq.transpose(0, 1) # B x T
#
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# # use the last value of input to continue decoding
# if input.size(1) > 1:
# input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
# else:
# input_ = input.transpose(0, 1)
#
# emb = self.word_lut(input_) * math.sqrt(self.model_size)
# input = input.transpose(0, 1) # B x T to T x B
# klen = input.size(0)
#
# # If we start a new sentence to decode: reset the context memory
# if klen == 1:
# streaming_state.reset_context_memory()
# if self.stream_context == 'limited':
# streaming_state.reset_target_memory()
#
# if self.use_language_embedding:
# lang_emb = self.language_embeddings(lang) # B x H or 1 x H
# if self.language_embedding_type == 'sum':
# emb = emb + lang_emb
# elif self.language_embedding_type == 'concat':
# # replace the bos embedding with the language
# bos_emb = lang_emb.expand_as(emb[0])
# emb[0] = bos_emb
#
# lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
# concat_emb = torch.cat([emb, lang_emb], dim=-1)
# emb = torch.relu(self.projector(concat_emb))
# else:
# raise NotImplementedError
#
# # need to manually definte src_lengths and tgt_lengths here
# src_lengths = torch.LongTensor([context.size(0)])
# tgt_lengths = torch.LongTensor([1])
#
# if context is not None:
# context_attn_mask = self.create_context_mask(input, src, src_lengths, tgt_lengths)
# context_attn_mask = context_attn_mask.unsqueeze(0)
# else:
# context_attn_mask = None
#
# dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, streaming_state.prev_tgt_mem_size)
#
# dec_attn_mask = dec_attn_mask[:, -1:, :]
#
# klen = 1 + streaming_state.prev_tgt_mem_size
#
# output = emb
#
# for i, layer in enumerate(self.layer_modules):
# # T x B x d_model
# buffer = streaming_state.tgt_buffer[i]
# # output, coverage = layer(output, context, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask, mask_src)
# # reuse_source = True if input.size(1) == 1 else False
# reuse_source = True
#
# # reuse source is True in this case because we can reuse the context ...
# output, coverage, buffer = layer(output, context, dec_attn_mask, context_attn_mask,
# incremental=True, incremental_cache=buffer, reuse_source=reuse_source)
# streaming_state.tgt_buffer[i] = buffer
#
# output = self.postprocess_layer(output)
#
# streaming_state.prev_tgt_mem_size += 1
# streaming_state.prune_target_memory(self.max_memory_size + input.size(0))
#
# extra_context = context[-self.extra_context_size:].detach()
#
# output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
# output_dict['streaming_state'] = streaming_state
#
# return output_dict
| 30,203
| 41.721358
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/unified_transformer.py
|
import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.universal_transformer_layers import UniversalEncoderLayer, UniversalDecoderLayer
# from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
torch.set_printoptions(profile="full")
class UnifiedTransformer(TransformerDecoder):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
# build_modules will be called from the inherited constructor
super(UnifiedTransformer, self).__init__(opt, tgt_embedding,
positional_encoder,
language_embeddings=language_embeddings,
allocate_positions=True)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# self.build_modules()
def gen_mask(self, src, tgt):
input_seq = torch.cat([src, tgt], dim=-1)
seq_len = input_seq.size(1)
if self.bidirectional:
bsz, src_len = src.size(0), src.size(1)
tgt_len = tgt.size(1)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool()
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(1)
attn_mask = attn_mask | pad_mask
# attn_mask = attn_mask.byte() + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# print(attn_mask[0])
# attn_mask = torch.gt(attn_mask, 0).bool()
else:
attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = torch.gt(attn_mask, 0).bool()
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = DecoderLayer(opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, batch, target_mask=None, **kwargs):
src = batch.get('source').transpose(0, 1) # src_len x batch_size -> bsz x src_len
tgt = batch.get('target_input').transpose(0, 1) # len_tgt x batch_size -> bsz x tgt_len
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(1)
src_len = src.size(1)
bsz = tgt.size(0)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
tgt_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
tgt_emb = self.time_transformer(tgt_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb.unsqueeze(1)
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=1) # L x batch_size x H
# prepare self-attention mask
# For the source: we have two different parts
# [1 x src_len x batch_size]
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(0).byte()
# src_pad_mask = mask_src_src
# # Attention from src to target: everything is padded
# mask_src_tgt = mask_src_src.new_ones(1, 1, 1).expand(src_len, tgt_len, bsz)
# # [src_len x L x batch_size]
# mask_src = torch.cat([mask_src_src.expand(src_len, src_len, bsz), mask_src_tgt], dim=1)
# mask_src = mask_src.bool()
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
# mask_src_tgt = mask_src_src.new_ones(bsz, src_len, tgt_len) # bsz x src_len x tgt_len
#
# mask_src = torch.cat([mask_src_src.expand(bsz, src_len, src_len), mask_src_tgt], dim=-1)
#
# # For the target:
# mask_tgt_tgt = tgt.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
#
# attn_mask = torch.cat([mask_src, mask_tgt], dim=1).bool() # L x L x batch_size
# lets try to use language modeling style
# input_seq = torch.cat([src, tgt], dim=-1)
# seq_len = input_seq.size(1)
#
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# attn_mask = torch.gt(attn_mask, 0).bool()
attn_mask = self.gen_mask(src, tgt)
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, None, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
emb = src_emb
src_len = input.size(1)
bsz = input.size(0)
mask_src_src = input.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
mask_src = mask_src_src
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state):
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
tgt = input
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
# print(src.size(), tgt.size())
# print(src_lang, tgt_lang)
tgt_len = tgt.size(1)
src_len = src.size(1)
bsz = tgt.size(0)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
tgt_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
tgt_emb = self.time_transformer(tgt_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb.unsqueeze(1)
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=1) # L x batch_size x H
# prepare self-attention mask
# For the source: we have two different parts
# [1 x src_len x batch_size]
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(0).byte()
# src_pad_mask = mask_src_src
# # Attention from src to target: everything is padded
# mask_src_tgt = mask_src_src.new_ones(1, 1, 1).expand(src_len, tgt_len, bsz)
# # [src_len x L x batch_size]
# mask_src = torch.cat([mask_src_src.expand(src_len, src_len, bsz), mask_src_tgt], dim=1)
# mask_src = mask_src.bool()
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
# mask_src_tgt = mask_src_src.new_ones(bsz, src_len, tgt_len) # bsz x src_len x tgt_len
#
# mask_src = torch.cat([mask_src_src.expand(bsz, src_len, src_len), mask_src_tgt], dim=-1)
#
# # For the target:
# mask_tgt_tgt = tgt.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
# attn_mask = torch.cat([mask_src, mask_tgt], dim=1).bool() # L x L x batch_size
attn_mask = self.gen_mask(src, input)
# seq = torch.cat([src, input], dim=-1)
# seq_len = seq.size(1)
# attn_mask = self.mask[:seq_len, :seq_len] + seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# attn_mask = torch.gt(attn_mask, 0).bool()
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, None, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
# buffers = decoder_state.attention_buffers
# tgt_lang = decoder_state.tgt_lang
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# if decoder_state.concat_input_seq:
# if decoder_state.input_seq is None:
# decoder_state.input_seq = input
# else:
# # concatenate the last input to the previous input sequence
# decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
#
# # For Transformer, both inputs are assumed as B x T (batch first)
# input = decoder_state.input_seq.transpose(0, 1)
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# if input.size(1) > 1:
# input_ = input[:, -1].unsqueeze(1)
# else:
# input_ = input
# """ Embedding: batch_size x 1 x d_model """
# # check = input_.gt(self.word_lut.num_embeddings)
# print(input.size())
# emb = self.tgt_embedding(input_) * math.sqrt(self.model_size)
#
# """ Adding positional encoding """
# emb = self.time_transformer(emb, t=input.size(1))
#
# if self.use_language_embedding:
# if self.language_embedding_type in ["sum", "all_sum"]:
#
# tgt_lang_emb = self.language_embeddings(tgt_lang)
# emb += tgt_lang_emb.unsqueeze(1)
#
# emb = emb.transpose(0, 1)
#
# # attention mask For the target:
# tgt_len = input.size(1)
# bsz = input.size(0)
# src_len = src.size(1)
# mask_tgt_tgt = input.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
#
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
#
# # take the last element of the 'target sequence' for the mask
# attn_mask = mask_tgt[:, -1, :].unsqueeze(1).bool()
#
# output = emb
#
# for i, layer in enumerate(self.layer_modules):
# buffer = buffers[i] if i in buffers else None
# assert (output.size(0) == 1)
#
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer=buffer)
#
# decoder_state.update_attention_buffer(buffer, i)
#
# # Final normalization
# output_dict = defaultdict(lambda: None)
# output_dict['hidden'] = output
#
# logprobs = self.generator[0](output_dict).squeeze(0)
#
# output_dict['src'] = decoder_state.src.transpose(0, 1)
# output_dict['log_prob'] = logprobs
# output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type)
# forward pass through the input to get the buffer
# _ = self.encode(src_transposed, decoder_state, input_pos=src_pos, input_lang=src_lang)
decoder_state.src_lang = src_lang
# buffers = decoder_state.attention_buffers
# bsz = src.size(1)
# new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
# new_order = new_order.to(src.device)
#
# for l in buffers:
# buffer_ = buffers[l]
# if buffer_ is not None:
# for k in buffer_.keys():
# t_, br_, d_ = buffer_[k].size()
# buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight
| 19,467
| 40.866667
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/universal_transformer.py
|
import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.universal_transformer_layers import UniversalEncoderLayer, UniversalDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
class UniversalTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
self.universal_layer = None
self.max_layers = opt.layers
# build_modules will be called from the inherited constructor
super(UniversalTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
self.positional_encoder = positional_encoder
# learnable embeddings for each layer
self.layer_embedding = nn.Embedding(opt.layers, opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
self.universal_layer = UniversalEncoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x 1 x len_src for broadcasting
# apply switchout
# if self.switchout > 0 and self.training:
# vocab_size = self.word_lut.weight.size(0)
# input = switchout(input, vocab_size, self.switchout)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
time_encoding = self.positional_encoder.get_positional_embeddings(emb)
# B x T x H -> T x B x H
context = self.preprocess_layer(emb.transpose(0, 1))
for i in range(self.max_layers):
layer_vector = torch.LongTensor([i]).to(emb.device)
layer_vector = self.layer_embedding(layer_vector).unsqueeze(0) # 1 x 1 x model_size
context = self.universal_layer(context, time_encoding, layer_vector, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': mask_src, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class UniversalTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.universal_layer = None
opt.ignore_source = ignore_source
self.max_layers = opt.layers
# build_modules will be called from the inherited constructor
super(UniversalTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source)
self.positional_encoder = positional_encoder
# Parameters for the position biases
self.layer_embeddings = nn.Embedding(opt.layers, opt.model_size)
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
self.universal_layer = UniversalDecoderLayer(self.opt, death_rate=self.death_rate)
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
mask_tgt = mask_tgt.bool()
time_embedding = self.positional_encoder.get_positional_embeddings(emb)
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
for i in range(self.max_layers):
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
output, coverage, _ = self.universal_layer(output, time_embedding, layer_embedding, context,
mask_tgt, mask_src)
# last layer norm
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input_)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
time_embedding = self.time_transformer.get_positional_embeddings(emb, t=input.size(1))
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if mask_src is None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
# # only get the final step of the mask during decoding (because the input of the network is only the last step)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# mask_tgt = None
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
for i in range(self.max_layers):
buffer = buffers[i] if i in buffers else None
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
assert (output.size(0) == 1)
output, coverage, buffer = self.universal_layer(output, time_embedding, layer_embedding, context,
mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 14,946
| 42.074928
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/universal_transformer_layers.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from collections import defaultdict
from onmt.models.transformers import PrePostProcessing, EncoderLayer, DecoderLayer
class UniversalEncoderLayer(EncoderLayer):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__(opt, death_rate=death_rate)
def forward(self, input, time_embedding, layer_vector, attn_mask):
input = input + time_embedding.unsqueeze(1) + layer_vector
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
# print(query.size(), attn_mask.size())
if self.fast_self_attention:
out, _ = self.multihead(query, query, query, attn_mask, None)
else:
out, _ = self.multihead(query, query, query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input
class UniversalDecoderLayer(DecoderLayer):
def __init__(self, opt, death_rate=0.0):
super().__init__(opt, death_rate=death_rate)
def forward(self, input, time_embedding, layer_vector, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param input:
:param layer_vector:
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# sum up
input = input + time_embedding.unsqueeze(1) + layer_vector
assert(len(input.shape) == 3)
if incremental:
if incremental_cache is None:
incremental_cache = dict()
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _, = self.multihead_tgt(query, query, query, None, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
else:
out, _, = self.multihead_tgt(query, query, query, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input, coverage, incremental_cache
| 4,861
| 33.48227
| 87
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer.py
|
import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import PrePostProcessing
from onmt.legacy.old_models.relative_universal_transformer_layers import \
RelativeUniversalEncoderLayer, RelativeUniversalDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeUniversalTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
self.universal_layer = None
self.unidirectional = opt.unidirectional
self.adaptive_type = opt.adaptive
# build_modules will be called from the inherited constructor
super(RelativeUniversalTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# learnable embeddings for each layer
self.layer_embedding = nn.Embedding(self.layers, opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
self.universal_layer = RelativeUniversalEncoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.PAD) # batch_size x len_src
# apply switchout
# if self.switchout > 0 and self.training:
# vocab_size = self.word_lut.weight.size(0)
# input = switchout(input, vocab_size, self.switchout)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4]
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
mem_len = 0
qlen = input.size(1)
klen = qlen + mem_len
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
# B x T x H -> T x B x H
context = self.preprocess_layer(emb.transpose(0, 1))
time_encoding = self.preprocess_layer(time_encoding)
# print(input.size(), context.size(), pos.size(), time_encoding.size())
for i in range(self.layers):
layer_vector = torch.LongTensor([i]).to(emb.device)
layer_vector = self.layer_embedding(layer_vector).unsqueeze(0) # 1 x 1 x model_size
context = self.universal_layer(context, time_encoding, layer_vector, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': mask_src, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeUniversalTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.universal_layer = None
opt.ignore_source = ignore_source
# build_modules will be called from the inherited constructor
super(RelativeUniversalTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source, allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# Parameters for the position biases
self.layer_embeddings = nn.Embedding(opt.layers, opt.model_size)
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.universal_layer = RelativeUniversalDecoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4]
else:
mask_src = src.data.eq(onmt.constants.PAD)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(emb.new_ones(len_tgt, len_tgt), diagonal=1).byte()
mask_tgt = mask_tgt.bool()
pos = torch.arange(len_tgt - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
time_encoding = self.preprocess_layer(time_encoding)
for i in range(self.layers):
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
output, coverage, _ = self.universal_layer(output, context, time_encoding, layer_embedding,
mask_tgt, mask_src)
# last layer norm
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if mask_src is None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte()
# # only get the final step of the mask during decoding (because the input of the network is only the last step)
# mask_tgt = mask_tgt[-1].unsqueeze(0)
# mask_tgt = None
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
pos = torch.arange(len_tgt - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
# time_encoding = time_encoding[-1].unsqueeze(0)
for i in range(self.layers):
# buffer = buffers[i] if i in buffers else None
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
# assert (output.size(0) == 1)
output, coverage, _ = self.universal_layer(output, context, time_encoding, layer_embedding,
mask_tgt, mask_src)
# decoder_state.update_attention_buffer(buffer, i)
output = output[-1:]
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 16,566
| 41.155216
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/FCTransformer/Layers.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class UniformMultiHeadAttention(nn.Module):
"""Applies multi-head attentions to inputs (query, key, value)
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
Params:
fc_query: FC layer to project query, d_model x (h x d_head)
fc_key: FC layer to project key, d_model x (h x d_head)
fc_value: FC layer to project value, d_model x (h x d_head)
fc_concat: FC layer to concat and project multiheads, d_model x (h x d_head)
Inputs Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Outputs Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, attn_p=0.1):
super(UniformMultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
assert d_model % h == 0
self.d_head = d_model//h
# first attention layer for states
self.fc_query = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_key = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_value = Bottle(Linear(d_model, h*self.d_head, bias=False))
# second attention for layers
#~ self.fc_query_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_key_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_value_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
# for output
self.sm = nn.Softmax(dim=-1)
self.fc_concat = Bottle(Linear(h*self.d_head, d_model, bias=False))
#~ self.fc_concat_2 = Bottle(Linear(d_model, d_model, bias=False))
#~ self.attn_dropout = nn.Dropout(attn_p)
self.attn_dropout = StaticDropout(attn_p)
#~ self.attn_dropout_2 = StaticDropout(attn_p)
def _prepare_proj(self, x):
"""Reshape the projectons to apply softmax on each head
"""
b, l, d = x.size()
return contiguous(x.view(b, l, self.h, self.d_head).transpose(1,2)).view(b*self.h, l, self.d_head)
def shape(self, x):
b, l, d = x.size()
return x.view(b, l, self.h, self.d_head) \
.transpose(1, 2)
def forward(self, query, key, mask=None, query_mask=None, value_mask=None):
n_layer, b, len_key = key.size(0), key.size(1), key.size(2)
if value_mask is not None:
value_mask = value_mask.unsqueeze(0).repeat(n_layer, 1, 1)
key_mask = value_mask # B x T
b, len_query = query.size(0), query.size(1)
value = key
# project inputs to multi-heads
proj_query = self.fc_query(query, mask=query_mask) # batch_size x len_query x h*d_head
proj_key = self.fc_key(key, mask=key_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
proj_value = self.fc_value(value, mask=value_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
# prepare the shape for applying softmax
proj_query = self.shape(proj_query) # batch_size x h x len_query x d_head
proj_key = self.shape(proj_key) # batch_size x h x (n_layer * len_key) x d_head
proj_value = self.shape(proj_value) # batch_size x h x (n_layer * len_key) x d_head
proj_query = proj_query * (self.d_head**-0.5)
# get dotproduct softmax attns for each head
scores = torch.matmul(proj_query, proj_key.transpose(2,3)) # b x self.h x len_query x n_layer*len_key
# applying mask using broadcasting
mask_ = Variable(mask.unsqueeze(-3).unsqueeze(-2))
scores = scores.view(b, self.h, len_query, n_layer, len_key)
scores = scores.masked_fill_(mask_, -float('inf'))
scores = scores.view(b, self.h, len_query, n_layer*len_key)
# softmax on the last dimension (all of the previous states)
attns = self.sm(scores) # b x 1 x len_query x n_layer*lenkey
attns = self.attn_dropout(attns)
out = torch.matmul(attns, proj_value) # b x self.h x len_query x self.d_head)
out = out.transpose(1, 2).contiguous().view(b, len_query, self.h * self.d_head)
out = self.fc_concat(out, mask=query_mask)
#~ out = final_out.view(b, len_query, self.h*self.d_head)
coverage = None
return out, coverage
class HierarchicalMultiHeadAttention(nn.Module):
"""Applies multi-head attentions to inputs (query, key, value)
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
Params:
fc_query: FC layer to project query, d_model x (h x d_head)
fc_key: FC layer to project key, d_model x (h x d_head)
fc_value: FC layer to project value, d_model x (h x d_head)
fc_concat: FC layer to concat and project multiheads, d_model x (h x d_head)
Inputs Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Outputs Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, attn_p=0.1):
super(HierarchicalMultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
assert d_model % h == 0
self.d_head = d_model//h
# first attention layer for states
self.fc_query = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_key = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_value = Bottle(Linear(d_model, h*self.d_head, bias=False))
# second attention for layers
self.fc_query_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_key_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_value_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
# for output
self.fc_concat = Bottle(Linear(h*self.d_head, d_model, bias=False))
self.fc_concat_2 = Bottle(Linear(d_model, d_model, bias=False))
self.sm = nn.Softmax(dim=-1)
self.sm_2 = nn.Softmax(dim=-1)
#~ self.attn_dropout = nn.Dropout(attn_p)
self.attn_dropout = StaticDropout(attn_p)
self.attn_dropout_2 = StaticDropout(attn_p)
def _prepare_proj(self, x):
"""Reshape the projectons to apply softmax on each head
"""
b, l, d = x.size()
return contiguous(x.view(b, l, self.h, self.d_head).transpose(1,2)).view(b*self.h, l, self.d_head)
def shape(self, x):
b, l, d = x.size()
return x.view(b, l, self.h, self.d_head) \
.transpose(1, 2)
def forward(self, query, key, mask=None, query_mask=None, value_mask=None):
n_layer, b, len_key = key.size(0), key.size(1), key.size(2)
#~ query_mask = None
#~ value_mask = None
if value_mask is not None:
value_mask = value_mask.unsqueeze(0).repeat(n_layer, 1, 1)
key_mask = value_mask # n_layer x B x T
b, len_query = query.size(0), query.size(1)
#~ key = key.transpose(0,1).contiguous().view(b, n_layer * len_key, -1)
value = key
# FIRST ATTENTION STEP
# project inputs to multi-heads
proj_query = self.fc_query(query, mask=query_mask) # batch_size x len_query x h*d_head
proj_key = self.fc_key(key, mask=key_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
proj_value = self.fc_value(value, mask=value_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
# prepare the shape for applying softmax
proj_query = self.shape(proj_query) # batch_size x h x len_query x d_head
proj_key = self.shape(proj_key) # batch_size x h x (n_layer * len_key) x d_head
proj_value = self.shape(proj_value) # batch_size x h x (n_layer * len_key) x d_head
proj_query = proj_query * (self.d_head**-0.5)
# get dotproduct softmax attns for each head
scores = torch.matmul(proj_query, proj_key.transpose(2,3)) # b x self.h x len_query x n_layer*len_key
# unshape to softmax on only the len_key dimension
scores = scores.view(b, self.h, len_query, n_layer, len_key)
mask_ = Variable(mask.unsqueeze(1).unsqueeze(-2)) # b x 1 x len_query x 1 x len_key
#~ mask_ = Variable(mask.unsqueeze(-3))
scores = scores.masked_fill_(mask_, -float('inf'))
# softmax on the last dimension (len_key)
#~ attns = self.sm(scores) # b x self.h x len_query x n_layer x len_key
attns = F.softmax(scores, dim=-1)
attns = self.attn_dropout(attns)
# apply attns on value
proj_value = proj_value.view(b, self.h, n_layer, len_key, self.d_head)
attns = attns.transpose(2, 3) # b, self.h, n_layer, len_query, len_key
out = torch.matmul(attns, proj_value) # b x self.h x n_layer x len_query x self.d_head
out = out.transpose(1, 3).contiguous().view(b, len_query, n_layer, self.h * self.d_head)
out = self.fc_concat(out, query_mask.unsqueeze(-1).repeat(1, 1, n_layer))
# 2ND ATTENTION LAYER
new_query = self.fc_query_2(query, mask=query_mask)
new_query = new_query.view(-1, new_query.size(-1)).unsqueeze(1) # batch_size*len_query x 1 x h*d_head
proj_query = self.shape(new_query) # batch_size*len_query x h x 1 x d_head
new_key = out.view(-1, n_layer, self.h * self.d_head) # b*len_query x n_layer x h*self.d_head
proj_key = self.shape(new_key) # batch_size*len_query x h x n_layer x d_head
if query_mask is not None:
flattened_mask = query_mask.view(-1)
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
proj_query = proj_query.index_select(0, non_pad_indices)
proj_key = proj_key.index_select(0, non_pad_indices)
proj_value = proj_key
scores_2 = torch.matmul(proj_query, proj_key.transpose(2,3)) # batch_size*len_query x h x 1 x n_layer
# no need to mask this time
attns_2 = F.softmax(scores_2, dim=-1) # batch_size*len_query x h x 1 x n_layer
#~ attns_2 = self.attn_dropout(attns_2)
out = torch.matmul(attns_2, proj_value) # batch_size*len_query x h x 1 x d_head
b_ = out.size(0)
#~ out = out.transpose(1, 2).unsqueeze(1).contiguous().view(b_, self.h * self.d_head) # batch_size x len_query x h*d_head
out = out.unsqueeze(2).view(-1, self.h * self.d_head)
out = self.fc_concat_2(out)
if query_mask is not None:
final_out = Variable(out.data.new(b*len_query, self.h * self.d_head).zero_())
final_out.index_copy_(0, non_pad_indices, out)
else:
final_out = out
out = final_out.view(b, len_query, self.h*self.d_head)
coverage = None
return out, coverage
class FCTEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
super(FCTEncoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~ self.multihead = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, memory_bank, attn_mask, pad_mask=None):
query = self.preprocess_attn(input)
if memory_bank is None:
memory_bank = query.unsqueeze(0)
else:
#~ memory_bank = query.unsqueeze(0)
memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
""" Deep attention layer """
out, _ = self.multihead(query, memory_bank, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask),
mask=pad_mask)
input = self.postprocess_ffn(out, input, mask=pad_mask)
return input, memory_bank
class FCTDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
super(FCTDecoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~ self.multihead_tgt = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead_tgt = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead_src = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if memory_bank is None:
memory_bank = query.unsqueeze(0)
else:
#~ memory_bank = query.unsqueeze(0)
memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # n_layer x batch_size x len_src x hidden
out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, memory_bank, coverage
def step(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if buffer is not None:
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
if memory_bank is None:
memory_bank = buffer.unsqueeze(0)
else:
memory_bank = torch.cat([memory_bank, buffer.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, memory_bank, coverage, buffer
| 21,054
| 38.801512
| 173
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/FCTransformer/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/FCTransformer/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.legacy.FCTransformer.Layers import FCTEncoderLayer, FCTDecoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class FCTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder):
super(FCTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.version = opt.version
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([FCTEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def forward(self, input):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context, memory_bank = checkpoint(custom_layer(layer), context, memory_bank, mask_src, pad_mask)
#~ print(type(context))
else:
context, memory_bank = layer(context, memory_bank, mask_src, pad_mask) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank = torch.cat([memory_bank, context.unsqueeze(0)], dim=0)
return memory_bank, mask_src
class FCTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(FCTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.version = opt.version
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
if self.version == 1.0:
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([FCTDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def forward(self, input, context, src):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, memory_bank, coverage = checkpoint(custom_layer(layer), output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
else:
output, memory_bank, coverage = layer(output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def step(self, input, context, src, buffer=None):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
output_buffer = list()
batch_size = input.size(0)
input_ = input[:,-1].unsqueeze(1)
# print(input_.size())
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = self.time_transformer(emb, t=input.size(1))
else:
prev_h = buffer[0] if buffer is None else None
emb = self.time_transformer(emb, prev_h)
buffer[0] = emb[1]
if isinstance(emb, tuple):
emb = emb[0] # emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
buffer_ = buffer[i] if buffer is not None else None
assert(output.size(1) == 1)
output, memory_bank, coverage, buffer_ = layer.step(output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage, buffer
| 12,177
| 38.411003
| 170
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/LSTMLM/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/LSTMLM/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformers import TransformerDecodingState
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
import onmt
from onmt.modules.dropout import embedded_dropout
#~ from onmt.modules.Checkpoint import checkpoint
from torch.utils.checkpoint import checkpoint
from collections import defaultdict
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.legacy.TransformerLM.Layers import LMDecoderLayer
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class LSTMLMDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts):
super().__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.encoder_type = opt.encoder_type
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.rnn = nn.LSTM(self.model_size, self.model_size, num_layers=3, dropout=self.dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.h = None
self.c = None
def renew_buffer(self, new_len):
return
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: (Variable) len_tgt x batch_size
Outputs Shapes:
out: len_tgt x batch_size x d_model
"""
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = self.preprocess_layer(emb)
if self.h is None:
lstm_mem = None
else:
lstm_mem = (self.h.detach(), self.c.detach())
output, (h, c) = self.rnn(emb, lstm_mem)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['lstm_mem'] = (h, c)
self.h = h
self.c = c
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
buffers = decoder_state.attention_buffers
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
# output_buffer = list()
# batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if isinstance(emb, tuple):
emb = emb[0]
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# print(mask_tgt)
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, mask_tgt,buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
class LSTMLM(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None):
super().__init__( encoder, decoder, generator)
self.model_size = self.decoder.model_size
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
# we only need target for language model
tgt = batch.get('target_input') # T x B
tgt_out = batch.get('target_output') # T x B
decoder_output = self.decoder(tgt)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = decoder_output['hidden']
return output_dict
def reset_states(self):
self.decoder.h = None
self.decoder.c = None
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
hidden, coverage = self.decoder.step(input_t, decoder_state)
log_prob = self.generator[0](hidden.squeeze(0))
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
return output_dict
# print a sample
def sample(self):
pass
def create_decoder_state(self, batch, beam_size=1):
return LSTMDecodingState(None, None, beam_size=beam_size, model_size=self.model_size)
class LSTMDecodingState(TransformerDecodingState):
def __init__(self, src, context, beam_size=1, model_size=512):
# if audio only take one dimension since only used for mask
self.beam_size = beam_size
self.input_seq = None
self.h = None
self.c = None
self.model_size = model_size
def update_beam(self, beam, b, remaining_sents, idx):
for tensor in [self.src, self.input_seq] :
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is None:
continue
for k in buffer_:
t_, br_, d_ = buffer_[k].size()
sent_states = buffer_[k].view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active(t):
if t is None:
return t
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, model_size)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_2d(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active(self.context)
self.input_seq = update_active_2d(self.input_seq)
self.src = update_active_2d(self.src)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
for k in buffer_:
buffer_[k] = update_active(buffer_[k])
| 9,163
| 29.751678
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/FusionNetwork/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/FusionNetwork/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.modules.base_seq2seq import DecoderState
from onmt.models.transformers import TransformerDecodingState
from collections import defaultdict
import torch.nn.functional as F
class FusionNetwork(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, tm_model, lm_model):
super(FusionNetwork, self).__init__()
self.tm_model = tm_model
self.lm_model = lm_model
# freezing the parameters for the language model
for param in self.lm_model.parameters():
param.requires_grad = False
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
nmt_output_dict = self.tm_model(batch)
# no gradient for the LM side
with torch.no_grad():
lm_output_dict = self.lm_model(batch)
output_dict = defaultdict(lambda: None)
output_dict['tm'] = nmt_output_dict
output_dict['lm'] = lm_output_dict
return output_dict
# an utility function to fuse two states
# return log prob
def fuse_states(self, tm_state, lm_state):
# PRENORM algorithm
# (1) generate the log P_lm
with torch.no_grad():
log_lm = self.lm_model.generator[0](lm_state, log_softmax=True)
# (2) generate the logits for tm
tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
# (3) add the bias of lm to the logits
dists = F.log_softmax(tm_logits + log_lm, dim=-1)
# ## POSTNORM
# # (1) generate the P_lm
# with torch.no_grad():
# lm_logits = self.lm_model.generator[0](lm_state, log_softmax=False)
#
# # (2) generate the logits for tm
# tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
#
# dists = F.log_softmax(F.softmax(tm_logits, dim=-1) * F.softmax(lm_logits, dim=-1), dim=-1)
return dists
def renew_buffer(self, new_len):
self.tm_model.decoder.renew_buffer(new_len)
self.lm_model.decoder.renew_buffer(new_len)
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
# (1) we decode using language model
context = self.tm_model.encoder(src)['context']
if (hasattr(self,
'autoencoder') and self.autoencoder and self.autoencoder.representation == "EncoderHiddenState"):
context = self.autoencoder.autocode(context)
decoder_output = self.tm_model.decoder(tgt_input, context, src)['hidden']
output = decoder_output
if (hasattr(self, 'autoencoder')
and self.autoencoder and self.autoencoder.representation == "DecoderHiddenState"):
output = self.autoencoder.autocode(output)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
# (2) decode using the language model
lm_decoder_output = self.lm_model.decoder(tgt_input)['hidden']
for dec_t, lm_t, tgt_t in zip(decoder_output, lm_decoder_output, tgt_output):
# generate the current step distribution from both states
gen_t = self.fuse_states(dec_t, lm_t)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.Constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.Constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object FusionDecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
# (1) decode using the translation model
tm_hidden, coverage = self.tm_model.decoder.step(input_t, decoder_state.tm_state)
# (2) decode using the translation model
lm_hidden, ________ = self.lm_model.decoder.step(input_t, decoder_state.lm_state)
log_prob = self.fuse_states(tm_hidden, lm_hidden)
# log_prob = self.tm_model.generator[0](tm_hidden)
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1):
"""
Generate a new decoder state based on the batch input
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
tm_decoder_state = self.tm_model.create_decoder_state(batch, beam_size=beam_size)
lm_decoder_state = self.lm_model.create_decoder_state(batch, beam_size=beam_size)
decoder_state = FusionDecodingState(tm_decoder_state, lm_decoder_state)
return decoder_state
class FusionDecodingState(DecoderState):
def __init__(self, tm_state, lm_state):
self.tm_state = tm_state
self.lm_state = lm_state
self.original_src = tm_state.original_src
self.beam_size = tm_state.beam_size
def update_beam(self, beam, b, remaining_sents, idx):
self.tm_state.update_beam(beam, b, remaining_sents, idx)
self.lm_state.update_beam(beam, b, remaining_sents, idx)
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
self.tm_state.prune_complete_beam(active_idx, remaining_sents)
self.lm_state.prune_complete_beam(active_idx, remaining_sents)
| 6,887
| 33.964467
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/DynamicConvolution/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/MixtureModel/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/MixtureModel/Models.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/TransformerLM/Layers.py
|
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import onmt
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Bottle, FeedForward
class LMDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, ):
super(LMDecoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static, share=1)
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, static=onmt.constants.static)
self.feedforward = Bottle(feedforward)
def forward(self, input, mask_tgt):
""" Self attention layer
layernorm > attn > dropout > residual
"""
# input and context should be time first ?
query = self.preprocess_attn(input)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
coverage = None
return input, coverage
def step(self, input, mask_tgt, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt.step(query, query, query, mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
| 3,338
| 32.059406
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/TransformerLM/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/legacy/TransformerLM/Models.py
|
import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformers import TransformerDecodingState
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
import onmt
from onmt.modules.dropout import embedded_dropout
#~ from onmt.modules.Checkpoint import checkpoint
from torch.utils.checkpoint import checkpoint
from collections import defaultdict
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.legacy.TransformerLM.Layers import LMDecoderLayer
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class TransformerLMDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(TransformerLMDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.encoder_type = opt.encoder_type
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
else:
raise NotImplementedError
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
self.build_modules()
def build_modules(self):
self.layer_modules = nn.ModuleList([LMDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size,
self.attn_dropout,
) for _ in range(self.layers)])
def renew_buffer(self, new_len):
print(new_len)
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.transpose(0, 1).contiguous()
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, mask_tgt) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = { 'hidden': output, 'coverage': coverage }
# return output, None
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
buffers = decoder_state.attention_buffers
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
# output_buffer = list()
# batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
else:
# prev_h = buffer[0] if buffer is None else None
# emb = self.time_transformer(emb, prev_h)
# buffer[0] = emb[1]
raise NotImplementedError
if isinstance(emb, tuple):
emb = emb[0]
# emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# print(mask_tgt)
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, mask_tgt,buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
class TransformerLM(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None):
super().__init__( encoder, decoder, generator)
self.model_size = self.decoder.model_size
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
# we only need target for language model
tgt = batch.get('target_input')
tgt_out = batch.get('target_output')
tgt = tgt.transpose(0, 1)
decoder_output = self.decoder(tgt)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = decoder_output['hidden']
return output_dict
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
hidden, coverage = self.decoder.step(input_t, decoder_state)
log_prob = self.generator[0](hidden.squeeze(0))
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
return output_dict
# print a sample
def sample(self):
pass
def create_decoder_state(self, batch, beam_size=1):
return TransformerDecodingState(None, None, beam_size=beam_size, model_size=self.model_size)
| 8,777
| 32.632184
| 112
|
py
|
pixyz
|
pixyz-main/setup.py
|
import io
import os
import re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
with io.open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setup(
name='pixyz',
version=find_version("pixyz", "__init__.py"),
packages=find_packages(),
url='https://github.com/masa-su/pixyz',
author='masa-su',
author_email='masa@weblab.t.u-tokyo.ac.jp',
description='Deep generative modeling library',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"torch>=1.0",
"scipy",
"numpy",
"sympy>=1.4",
"ipython",
"networkx",
],
extras_require={
'dev': ['pytest',
'flake8==3.9.2'
'pytest-cov',
'pytest-flake8',
'sphinx',
'sphinx_rtd_theme',
'twine',
"tqdm",
"torchvision",
"tensorboardX",
'sklearn'],
'test': ['pytest-cov',
'flake8==3.9.2',
'pytest-flake8',
'sphinx',
'sphinx_rtd_theme',
'tqdm',
'sklearn'],
},
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",
],
)
| 2,028
| 26.053333
| 68
|
py
|
pixyz
|
pixyz-main/pixyz/utils.py
|
import functools
import torch
import sympy
from IPython.display import Math
import pixyz
_EPSILON = 1e-07
_CACHE_MAXSIZE = 2 * 10
def set_epsilon(eps):
"""Set a `epsilon` parameter.
Parameters
----------
eps : int or float
Returns
-------
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._EPSILON', 1e-07):
... set_epsilon(1e-06)
... epsilon()
1e-06
"""
global _EPSILON
_EPSILON = eps
def epsilon():
"""Get a `epsilon` parameter.
Returns
-------
int or float
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._EPSILON', 1e-07):
... epsilon()
1e-07
"""
return _EPSILON
def set_cache_maxsize(cache_maxsize):
"""Set a `cache_maxsize` parameter.
Parameters
----------
cache_maxsize : int
Returns
-------
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._CACHE_MAXSIZE', 100):
... set_cache_maxsize(100)
... cache_maxsize()
100
"""
global _CACHE_MAXSIZE
_CACHE_MAXSIZE = cache_maxsize
def cache_maxsize():
"""Get a `cache_maxsize` parameter.
Returns
-------
int
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._CACHE_MAXSIZE', 100):
... cache_maxsize()
100
"""
return _CACHE_MAXSIZE
def get_dict_values(dicts, keys, return_dict=False):
"""Get values from `dicts` specified by `keys`.
When `return_dict` is True, return values are in dictionary format.
Parameters
----------
dicts : dict
keys : list
return_dict : bool
Returns
-------
dict or list
Examples
--------
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b"])
[2]
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b", "d"], True)
{'b': 2}
"""
new_dicts = dict((key, dicts[key]) for key in keys if key in list(dicts.keys()))
if return_dict is False:
return list(new_dicts.values())
return new_dicts
def delete_dict_values(dicts, keys):
"""Delete values from `dicts` specified by `keys`.
Parameters
----------
dicts : dict
keys : list
Returns
-------
new_dicts : dict
Examples
--------
>>> delete_dict_values({"a":1,"b":2,"c":3}, ["b","d"])
{'a': 1, 'c': 3}
"""
new_dicts = dict((key, value) for key, value in dicts.items() if key not in keys)
return new_dicts
def detach_dict(dicts):
"""Detach all values in `dicts`.
Parameters
----------
dicts : dict
Returns
-------
dict
"""
return {k: v.detach() for k, v in dicts.items()}
def replace_dict_keys(dicts, replace_list_dict):
""" Replace values in `dicts` according to `replace_list_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dicts : dict
Dictionary.
Examples
--------
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","b":"y"})
{'x': 1, 'y': 2, 'c': 3}
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","e":"y"}) # keys of `replace_list_dict`
{'x': 1, 'b': 2, 'c': 3}
"""
replaced_dicts = dict([(replace_list_dict[key], value) if key in list(replace_list_dict.keys())
else (key, value) for key, value in dicts.items()])
return replaced_dicts
def replace_dict_keys_split(dicts, replace_list_dict):
""" Replace values in `dicts` according to :attr:`replace_list_dict`.
Replaced dict is splitted by :attr:`replaced_dict` and :attr:`remain_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dict : dict
Dictionary.
remain_dict : dict
Dictionary.
Examples
--------
>>> replace_list_dict = {'a': 'loc'}
>>> x_dict = {'a': 0, 'b': 1}
>>> print(replace_dict_keys_split(x_dict, replace_list_dict))
({'loc': 0}, {'b': 1})
"""
replaced_dict = {replace_list_dict[key]: value for key, value in dicts.items()
if key in list(replace_list_dict.keys())}
remain_dict = {key: value for key, value in dicts.items()
if key not in list(replace_list_dict.keys())}
return replaced_dict, remain_dict
# immutable dict class
class FrozenSampleDict:
def __init__(self, dict_):
self.dict = dict_
def __hash__(self):
hashes = [(hash(key), hash(value)) for key, value in self.dict.items()]
return hash(tuple(hashes))
def __eq__(self, other):
class EqTensor:
def __init__(self, tensor):
self.tensor = tensor
def __eq__(self, other):
if not torch.is_tensor(self.tensor):
return self.tensor == other.tensor
return torch.all(self.tensor.eq(other.tensor))
return {key: EqTensor(value) for key, value in self.dict.items()} ==\
{key: EqTensor(value) for key, value in other.dict.items()}
def lru_cache_for_sample_dict():
"""
Memoize the calculation result linked to the argument of sample dict.
Note that dictionary arguments of the target function must be sample dict.
Returns
-------
decorator function
Examples
--------
>>> import time
>>> import torch.nn as nn
>>> import pixyz.utils as utils
>>> utils.set_cache_maxsize(2)
>>> import pixyz.distributions as pd
>>> class LongEncoder(pd.Normal):
... def __init__(self):
... super().__init__(var=['x'], cond_var=['y'])
... self.nn = nn.Sequential(*(nn.Linear(1,1) for i in range(10000)))
... def forward(self, y):
... return {'loc': self.nn(y), 'scale': torch.ones(1,1)}
... @lru_cache_for_sample_dict()
... def get_params(self, params_dict={}, **kwargs):
... return super().get_params(params_dict, **kwargs)
>>> def measure_time(func):
... start = time.time()
... func()
... elapsed_time = time.time() - start
... return elapsed_time
>>> le = LongEncoder()
>>> y = torch.ones(1, 1)
>>> t_sample1 = measure_time(lambda:le.sample({'y': y}))
>>> print ("sample1:{0}".format(t_sample1) + "[sec]") # doctest: +SKIP
>>> t_log_prob = measure_time(lambda:le.get_log_prob({'x': y, 'y': y}))
>>> print ("log_prob:{0}".format(t_log_prob) + "[sec]") # doctest: +SKIP
>>> t_sample2 = measure_time(lambda:le.sample({'y': y}))
>>> print ("sample2:{0}".format(t_sample2) + "[sec]") # doctest: +SKIP
>>> assert t_sample1 > t_sample2, "processing time increases: {0}".format(t_sample2 - t_sample1)
"""
maxsize = cache_maxsize()
raw_decorating_function = functools.lru_cache(maxsize=maxsize, typed=False)
def decorating_function(user_function):
def wrapped_user_function(sender, *args, **kwargs):
new_args = list(args)
new_kwargs = dict(kwargs)
for i in range(len(args)):
if isinstance(args[i], FrozenSampleDict):
new_args[i] = args[i].dict
for key in kwargs.keys():
if isinstance(kwargs[key], FrozenSampleDict):
new_kwargs[key] = kwargs[key].dict
return user_function(sender, *new_args, **new_kwargs)
def frozen(wrapper):
def frozen_wrapper(sender, *args, **kwargs):
new_args = list(args)
new_kwargs = dict(kwargs)
for i in range(len(args)):
if isinstance(args[i], list):
new_args[i] = tuple(args[i])
elif isinstance(args[i], dict):
new_args[i] = FrozenSampleDict(args[i])
for key in kwargs.keys():
if isinstance(kwargs[key], list):
new_kwargs[key] = tuple(kwargs[key])
elif isinstance(kwargs[key], dict):
new_kwargs[key] = FrozenSampleDict(kwargs[key])
result = wrapper(sender, *new_args, **new_kwargs)
return result
return frozen_wrapper
return frozen(raw_decorating_function(wrapped_user_function))
return decorating_function
def tolist(a):
"""Convert a given input to the dictionary format.
Parameters
----------
a : list or other
Returns
-------
list
Examples
--------
>>> tolist(2)
[2]
>>> tolist([1, 2])
[1, 2]
>>> tolist([])
[]
"""
if type(a) is list:
return a
return [a]
def sum_samples(samples, sum_dims=None):
"""Sum a given sample across the axes.
Parameters
----------
samples : torch.Tensor
Input sample.
sum_dims : torch.Size or list of int or None
Dimensions to reduce. If it is None, all dimensions are summed except for the first dimension.
Returns
-------
torch.Tensor
Sumed sample.
Examples
--------
>>> a = torch.ones([2])
>>> sum_samples(a).size()
torch.Size([2])
>>> a = torch.ones([2, 3])
>>> sum_samples(a).size()
torch.Size([2])
>>> a = torch.ones([2, 3, 4])
>>> sum_samples(a).size()
torch.Size([2])
"""
if sum_dims is not None:
if len(sum_dims) == 0:
return samples
return torch.sum(samples, dim=sum_dims)
dim = samples.dim()
if dim == 1:
return samples
dim_list = list(torch.arange(samples.dim()))
samples = torch.sum(samples, dim=dim_list[1:])
return samples
def print_latex(obj):
"""Print formulas in latex format.
Parameters
----------
obj : pixyz.distributions.distributions.Distribution, pixyz.losses.losses.Loss or pixyz.models.model.Model.
"""
if isinstance(obj, pixyz.distributions.distributions.Distribution):
latex_text = obj.prob_joint_factorized_and_text
elif isinstance(obj, pixyz.distributions.distributions.DistGraph):
latex_text = obj.prob_joint_factorized_and_text
elif isinstance(obj, pixyz.losses.losses.Loss):
latex_text = obj.loss_text
elif isinstance(obj, pixyz.models.model.Model):
latex_text = obj.loss_cls.loss_text
return Math(latex_text)
def convert_latex_name(name):
return sympy.latex(sympy.Symbol(name))
| 10,567
| 24.965602
| 111
|
py
|
pixyz
|
pixyz-main/pixyz/__init__.py
|
name = "pixyz"
__version__ = "0.3.3"
| 37
| 11.666667
| 21
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/distributions.py
|
from __future__ import print_function
import torch
import re
import networkx as nx
from torch import nn
from ..utils import get_dict_values, replace_dict_keys, delete_dict_values,\
tolist, sum_samples, convert_latex_name, lru_cache_for_sample_dict
from ..losses import LogProb, Prob
def _make_prob_text(dist_name, var, cond_var):
var_text = ','.join(convert_latex_name(var_name) for var_name in var)
cond_text = '' if len(cond_var) == 0 else \
'|' + ','.join(convert_latex_name(var_name) for var_name in cond_var)
return f"{dist_name}({var_text}{cond_text})"
def _make_prob_equality_text(prob_text, prob_factorized_text):
if prob_factorized_text == prob_text:
return prob_text
else:
return f"{prob_text} = {prob_factorized_text}"
def _make_distribution_text(prob_joint_factorized_and_text, network_text):
# Distribution
text = f"Distribution:\n {prob_joint_factorized_and_text}\n"
# Network architecture (`repr`)
network_text = re.sub('^', ' ' * 2, str(network_text), flags=re.MULTILINE)
text += f"Network architecture:\n{network_text}"
return text
class Factor:
"""
This class wraps an atomic distribution as a factor node of a DistGraph.
It allocates new instance even if the same atomic distribution is specified.
This class assumes the lifespan of it is covered by the lifespan of the DistGraph.
"""
def __init__(self, atom_dist):
self.dist = atom_dist
self.name_dict = {}
self.option = {}
def copy(self):
inst = Factor(self.dist)
inst.name_dict = dict(self.name_dict)
inst.option = dict(self.option)
return inst
def rename_var(self, replace_dict):
name_dict = self.name_dict
# name_dict:global->local + replace:global->new_global = name_dict:new_global->local
for var_name, new_var_name in replace_dict.items():
if var_name in name_dict:
local_var = name_dict[var_name]
del name_dict[var_name]
name_dict[new_var_name] = local_var
else:
name_dict[new_var_name] = var_name
@property
def _reversed_name_dict(self):
return {value: key for key, value in self.name_dict.items()}
@staticmethod
def __apply_dict(dict, var):
return [dict[var_name] if var_name in dict else var_name for var_name in var]
def _get_local_input_dict(self, values, input_var=None):
if not input_var:
input_var = self.dist.input_var
global_input_var = self.__apply_dict(self._reversed_name_dict, input_var)
if any(var_name not in values for var_name in global_input_var):
raise ValueError("lack of some variables")
input_dict = get_dict_values(values, global_input_var, return_dict=True)
local_input_dict = replace_dict_keys(input_dict, self.name_dict)
return local_input_dict
def sample(self, values, sample_option):
local_input_dict = self._get_local_input_dict(values)
# Overwrite log_prob_option with self.option to give priority to local settings such as batch_n
option = dict(sample_option)
option.update(self.option)
local_output_dict = self.dist.sample(local_input_dict, **option)
# TODO: It shows return_hidden option change graphical model. This is bad operation.
ignore_hidden = ('return_hidden' in sample_option and sample_option['return_hidden'])
ignore_hidden |= ('return_hidden' in self.option and self.option['return_hidden'])
if not ignore_hidden and set(local_output_dict) != set(self.dist.var):
raise Exception(f"The sample method of {self.dist.distribution_name} returns different variables."
f" Expected:{list(self.dist.var)}, Got:{list(local_output_dict)}")
sample = replace_dict_keys(local_output_dict, self._reversed_name_dict)
return sample
def get_log_prob(self, values, log_prob_option):
local_input_dict = self._get_local_input_dict(values, list(self.dist.var) + list(self.dist.cond_var))
# Overwrite log_prob_option with self.option to give priority to local settings such as batch_n
option = dict(log_prob_option)
option.update(self.option)
log_prob = self.dist.get_log_prob(local_input_dict, **option)
return log_prob
def get_params(self, params_dict={}, **kwargs):
orig_params_dict = self._get_local_input_dict(params_dict)
params = self.dist.get_params(orig_params_dict, **kwargs)
return params
def sample_mean(self, values={}):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.sample_mean(local_input_dict)
return result
def sample_variance(self, values={}):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.sample_variance(local_input_dict)
return result
def get_entropy(self, values={}, sum_features=True, feature_dims=None):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.get_entropy(local_input_dict, sum_features, feature_dims)
return result
@property
def input_var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.input_var)
@property
def var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.var)
@property
def cond_var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.cond_var)
@property
def prob_text(self):
return _make_prob_text(self.dist.name, self.var, self.cond_var)
def __str__(self):
prob_node_text = self.prob_text
factorized_text = self.dist.prob_factorized_text
if prob_node_text == factorized_text:
header_text = f"{prob_node_text}:\n"
else:
header_text = f"{prob_node_text} -> {self.dist.prob_joint_factorized_and_text}:\n"
return header_text + repr(self.dist)
class DistGraph(nn.Module):
"""
Graphical model class. This manages the graph of Graphical Model of distribution.
It is called from Distribution class.
"""
def __init__(self, original=None):
super().__init__()
self.graph = nx.DiGraph()
self.global_option = {}
self.marginalize_list = set()
self.name = ''
if original:
self._override_module(original)
self.graph = nx.relabel_nodes(original.graph,
{factor: factor.copy() for factor in original.factors()})
self.global_option.update(original.global_option)
self.marginalize_list.update(original.marginalize_list)
self.name = original.name
def _override_module(self, original: nn.Module):
name_offset = len(list(self.named_children()))
for i, (_, module) in enumerate(original.named_children()):
self.add_module(str(name_offset + i), module)
def appended(self, atom_dist):
""" Return new graph appended one node.
Parameters
----------
atom_dist : Distribution
Returns
-------
DistGraph
"""
new_instance = DistGraph(self)
if not new_instance.name:
new_instance.name = atom_dist.name
# factor node of an atomic distribution
factor = Factor(atom_dist)
new_instance.add_module(str(len(list(new_instance.factors()))), atom_dist)
new_instance.graph.add_node(factor)
for var_name in atom_dist.var:
if var_name in new_instance.graph:
raise ValueError(f"A new variable name '{var_name}' is already used in this graph.")
new_instance.graph.add_edge(factor, var_name)
for cond in atom_dist.cond_var:
new_instance.graph.add_edge(cond, factor)
return new_instance
def set_option(self, option_dict, var=[]):
""" Set option arguments which used when you call `sample` or `get_log_prob` methods.
Parameters
----------
option_dict: dict of str and any object
var: list of string
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
>>> # Set options only on the sampling start node
>>> dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
>>> sample = dist.sample()
>>> sample['y'].shape
torch.Size([2, 3, 4])
>>> sample['x'].shape
torch.Size([2, 3, 4])
"""
if not var:
self.global_option = option_dict
else:
for var_name in var:
for factor in self._factors_from_variable(var_name):
factor.option = option_dict
def united(self, other):
if not set(self.var + list(self.marginalize_list)).isdisjoint(set(other.var + list(other.marginalize_list))):
raise ValueError("There is var-name conflicts between two graphs.")
if not set(self.factors()).isdisjoint(set(other.factors())):
raise ValueError("The same instances of a distribution are used between two graphs.")
scg = DistGraph(self)
scg._override_module(other)
scg.graph.update(other.graph)
scg.global_option.update(other.global_option)
scg.marginalize_list.update(other.marginalize_list)
return scg
def marginalized(self, marginalize_list):
""" Return new graph marginalized some variables
Parameters
----------
marginalize_list : iterative of str
Returns
-------
DistGraph
Examples
--------
>>> import pixyz.distributions as pd
>>> dist = pd.Normal(var=['x']).marginalize_var(['x'])
Traceback (most recent call last):
...
ValueError: marginalize_list has unknown variables or it has all of variables of `p`.
>>> dist = (pd.Normal(var=['x'])*pd.Normal(var=['y'])).marginalize_var(['x'])
>>> dist.graph.marginalize_list
{'x'}
>>> dist.var
['y']
>>> dist.cond_var
[]
"""
marginalize_list = set(marginalize_list)
if len(marginalize_list) == 0:
raise ValueError("Length of `marginalize_list` must be at least 1, got 0.")
if not marginalize_list < set(self.var):
raise ValueError("marginalize_list has unknown variables or it has all of variables of `p`.")
new_graph = DistGraph(self)
new_graph.marginalize_list.update(marginalize_list)
return new_graph
def var_replaced(self, replace_dict):
r""" Returns new graph whose variables are replaced.
Parameters
----------
replace_dict: dict of str and str
Returns
-------
DistGraph
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import pixyz.distributions as pd
>>> normal = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> normal2 = pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> multi_dist = normal * normal2
>>> normal3 = pd.Normal(var=['z'], cond_var=['y'], loc='y', scale=torch.ones(1))
>>> multi_dist2 = multi_dist * normal3
>>> # 周辺化した変数へのリネームは許可しない
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='y')
Traceback (most recent call last):
...
ValueError: ['y', 'z'] are conflicted after replaced.
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='w', x='z')
>>> sample = dist3.sample()
>>> sample # doctest: +SKIP
{'w': tensor([[2.3206]]), 'z': tensor([[-0.5381]])}
>>> dist4 = multi_dist2.marginalize_var(['y']).replace_var(z='w', x='z').replace_var(z='a')
>>> print(dist4)
Distribution:
p(w,a) = \int p(a)p(w|y)p(y)dy
Network architecture:
p(y):
Normal(
name=p, distribution_name=Normal,
var=['y'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
p(w|y) -> p(z|y):
Normal(
name=p, distribution_name=Normal,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([1])
(scale): torch.Size([1, 1])
)
p(a) -> p(x):
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> print(repr(dist4))
DistGraph(
(0): Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
(1): Normal(
name=p, distribution_name=Normal,
var=['y'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
(2): Normal(
name=p, distribution_name=Normal,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([1])
(scale): torch.Size([1, 1])
)
)
"""
# check replace_dict
if not (set(replace_dict) <= set(self.all_var)):
unknown_var = [var_name for var_name in replace_dict.keys() if var_name not in self.all_var]
raise ValueError(f"replace_dict has unknown variables: {unknown_var}")
replaced_vars = [replace_dict[var_name] if var_name in replace_dict else var_name for var_name in self.all_var]
if len(self.all_var) != len(set(replaced_vars)):
duplicated_vars = [var_name for var_name in self.all_var
if replaced_vars.count(replace_dict[var_name]
if var_name in replace_dict else var_name) > 1]
raise ValueError(f"{duplicated_vars} are conflicted after replaced.")
result = DistGraph(original=self)
result.graph = nx.relabel_nodes(result.graph, replace_dict, copy=False)
result.marginalize_list = {replace_dict[var] if var in replace_dict else var for var in self.marginalize_list}
result.global_option = dict(self.global_option)
for factor in result.factors():
if set(replace_dict.values()).isdisjoint(list(result.graph.pred[factor]) + list(result.graph.succ[factor])):
continue
factor.rename_var(replace_dict)
return result
def _factors_from_variable(self, var_name):
return list(self.graph.pred[var_name])
def factors(self, sorted=False):
""" get factors of the DistGraph.
Parameters
----------
sorted: bool
the order of factors is topological sorted or not.
Returns
-------
iter of Factor
"""
nodes = nx.topological_sort(self.graph) if sorted else self.graph
for node in nodes:
if isinstance(node, Factor):
yield node
def distribution(self, var_name):
""" An atomic distribution of the specified variable.
Parameters
----------
var_name: str
Returns
-------
Distribution
"""
factors = self._factors_from_variable(var_name)
if len(factors) == 0:
raise ValueError(f"There is no distirbution about {var_name}.")
if len(factors) != 1:
raise NotImplementedError("multiple factors are not supported now.")
return factors[0].dist
@property
def all_var(self):
""" All variables in the DistGraph.
Returns
-------
list of str
"""
return [var_name for var_name in self.graph if isinstance(var_name, str)]
@property
def input_var(self):
""" conditional variables and observation variables in the DistGraph.
Returns
-------
list of str
"""
def is_input_var_node(var_name):
if not isinstance(var_name, str):
return False
if not self.graph.pred[var_name]:
return True
if var_name in self._factors_from_variable(var_name)[0].input_var:
return True
else:
return False
return [var_name for var_name in self.graph if is_input_var_node(var_name)]
@property
def cond_var(self):
""" conditional variables in the DistGraph.
Returns
-------
list of str
"""
return [var_name for var_name in self.graph if isinstance(var_name, str) and not self.graph.pred[var_name]]
@property
def var(self):
""" hidden variables in the DistGraph.
Returns
-------
list of str
"""
def is_var_node(var_name):
if not isinstance(var_name, str):
return False
if self.graph.pred[var_name] and var_name not in self.marginalize_list:
return True
else:
return False
return [var_name for var_name in self.graph if is_var_node(var_name)]
def forward(self, mode, kwargs):
if mode == 'sample':
return self._sample(**kwargs)
elif mode == 'get_log_prob':
return self._get_log_prob(**kwargs)
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
_kwargs = dict(x_dict=x_dict, batch_n=batch_n, sample_shape=sample_shape,
return_all=return_all, reparam=reparam, sample_mean=sample_mean)
_kwargs.update(kwargs)
return self('sample', kwargs=_kwargs)
def _sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
"""
Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import pixyz.distributions as pd
>>> # atomへのアクセスにはgraphは使われない.
>>> normal = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> normal.sample(batch_n=2, sample_shape=torch.Size((3, 4)),
... return_all=True, reparam=True)['x'].shape
torch.Size([3, 4, 2, 1])
>>> normal2 = pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> multi_dist = normal * normal2
>>> sample = multi_dist.sample()
>>> sample # doctest: +SKIP
{'y': tensor([[0.6635]]), 'x': tensor([[0.3966]])}
>>> sample = multi_dist.sample(batch_n=2)
>>> normal3 = pd.Normal(var=['z'], cond_var=['y'], loc='y', scale=torch.ones(1))
>>> wrong_dist = multi_dist * normal2
Traceback (most recent call last):
...
ValueError: There is var-name conflicts between two graphs.
>>> multi_dist2 = multi_dist * normal3
>>> # TODO: this issue will be solved at another pull request. distribution with cond_var has the problem.
>>> multi_dist2.sample(batch_n=2, sample_shape=(3, 4))
Traceback (most recent call last):
...
ValueError: Batch shape mismatch. batch_shape from parameters: torch.Size([3, 4, 2, 1])
specified batch size:2
>>> sample = multi_dist2.sample(batch_n=2)
>>> sample # doctest: +SKIP
{'y': tensor([[1.6723], [0.1929]]), 'z': tensor([[ 0.8572], [-0.5933]]), 'x': tensor([[-0.4255], [-0.4793]])}
>>> sample = multi_dist2.sample(sample_shape=(1,))
>>> sample # doctest: +SKIP
{'y': tensor([[[-0.8537]]]), 'z': tensor([[[[-2.1819]]]]), 'x': tensor([[[-0.0797]]])}
>>> # return_all=Falseで条件付けられた変数や使用しなかった変数を含まない戻り値を得る
>>> normal4 = pd.Normal(var=['a'], cond_var=['b'], loc='b', scale=torch.ones(1))
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='w').replace_var(x='z').replace_var(z='x')*normal4
>>> sample = dist3.sample(x_dict={'b': torch.ones(2, 1), 'c': torch.zeros(1)}, return_all=False)
>>> sample.keys()
dict_keys(['a', 'w', 'x'])
>>> from pixyz.distributions import Normal, Categorical
>>> from pixyz.distributions.mixture_distributions import MixtureModel
>>> z_dim = 3 # the number of mixture
>>> x_dim = 2 # the input dimension.
>>> distributions = [] # the list of distributions
>>> for i in range(z_dim):
... loc = torch.randn(x_dim) # initialize the value of location (mean)
... scale = torch.empty(x_dim).fill_(1.) # initialize the value of scale (variance)
... distributions.append(Normal(loc=loc, scale=scale, var=["y"], name="p_%d" %i))
>>> probs = torch.empty(z_dim).fill_(1. / z_dim) # initialize the value of probabilities
>>> prior = Categorical(probs=probs, var=["z"], name="prior")
>>> p = MixtureModel(distributions=distributions, prior=prior)
>>> dist = normal*p
>>> dist.graph.set_option({'return_hidden': True}, var=['y'])
>>> list(dist.sample().keys())
['y', 'z', 'x']
"""
sample_option = dict(self.global_option)
sample_option.update(dict(batch_n=batch_n, sample_shape=sample_shape,
return_all=False, reparam=reparam, sample_mean=sample_mean))
sample_option.update(kwargs)
# ignore return_all because overriding is now under control.
if not(set(x_dict) >= set(self.input_var)):
raise ValueError(f"Input keys are not valid, expected {set(self.input_var)} but got {set(x_dict)}.")
values = get_dict_values(x_dict, self.input_var, return_dict=True)
for factor in self.factors(sorted=True):
sample = factor.sample(values, sample_option)
values.update(sample)
result_dict = delete_dict_values(values, self.marginalize_list)
if return_all:
output_dict = dict(delete_dict_values(x_dict, self.input_var))
output_dict.update(result_dict)
return output_dict
else:
return delete_dict_values(result_dict, self.input_var)
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
return self(mode='get_log_prob', kwargs={'x_dict': x_dict, 'sum_features': sum_features,
'feature_dims': feature_dims})
def _get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
""" Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import torch
>>> import pixyz.distributions as pd
>>> # atomへのアクセスにはgraphは使われない.
>>> pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1)).get_log_prob({'x': torch.zeros(1, 1)})
tensor([-0.9189])
>>> # 同時分布などにはDistGraphが使われる
>>> dist = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> dist *= pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> dist = dist.replace_var(y='z')
>>> dist.get_log_prob({'x': torch.zeros(1, 1), 'z': torch.zeros(1, 1)})
tensor([-1.8379])
>>> # 周辺化がある場合,対数尤度は計算されない.
>>> m_dist = dist.marginalize_var(['z'])
>>> m_dist.get_log_prob({'x': torch.zeros(1, 1)})
Traceback (most recent call last):
...
NotImplementedError
"""
# """
# >>> # 確率変数の周辺化がある場合,対数尤度は計算されない.
# >>> m_dist = dist.marginalize_var(['z'])
# >>> m_dist.get_log_prob({'x': torch.zeros(1, 1)})
# Traceback (most recent call last):
# ...
# ValueError: This distribution is marginalized by the stochastic variables '['z']'. Log probability of it can not be calcurated.
# >>> # 決定論的な変数の周辺化がある場合,決定論的な変数が一致する前提で対数尤度が計算される.
# >>> class MyDeterministic(pd.Deterministic):
# ... def forward(self):
# ... return {'x': torch.zeros(1, 1)}
# >>> dist = MyDeterministic(var=['x'])
# >>> dist *= pd.Normal(var=['y'], cond_var=['x'], loc='x', scale=torch.ones(1))
# >>> dist.get_log_prob({'y': torch.zeros(1, 1), 'x': torch.zeros(1, 1)})
# Traceback (most recent call last):
# ...
# NotImplementedError: Log probability of deterministic distribution is not defined.
# >>> m_dist = dist.marginalize_var(['x'])
# >>> m_dist.get_log_prob({'y': torch.zeros(1, 1)})
# tensor([-0.9189])
# """
sample_option = dict(self.global_option)
# sample_option.update(dict(batch_n=batch_n, sample_shape=sample_shape, return_all=False))
if len(self.marginalize_list) != 0:
raise NotImplementedError()
log_prob_option = dict(self.global_option)
log_prob_option.update(dict(sum_features=sum_features, feature_dims=feature_dims))
log_prob_option.update(kwargs)
require_var = self.var + self.cond_var
if not(set(x_dict) >= set(require_var)):
raise ValueError(f"Input keys are not valid, expected {set(require_var)}"
f" but got {set(x_dict)}.")
values = get_dict_values(x_dict, require_var, return_dict=True)
log_prob = None
prev_dist = None
for factor in self.factors(sorted=True):
local_var = self.graph.succ[factor]
local_marginalized_var = [var_name for var_name in local_var if var_name in self.marginalize_list]
if len(local_marginalized_var) != 0:
if any(var_name in values for var_name in local_marginalized_var):
raise ValueError(f"The marginalized variables '{local_marginalized_var}'"
f" appears in the dictionary: {x_dict}.")
if factor.dist.distribution_name != "Deterministic":
raise ValueError(f"This distribution is marginalized by the stochastic variables '{local_marginalized_var}'."
f" Log probability of it can not be calcurated.")
if set(local_var) != set(local_marginalized_var):
raise ValueError("Some deterministic variables are not marginalized.")
# batch_nに関しては後続の変数に与えられた値で判断できる,sample_shapeはnamed_shapeなら解決できそう
sample = factor.sample(values, sample_option)
values.update(sample)
continue
new_log_prob = factor.get_log_prob(values, log_prob_option)
if log_prob is None:
log_prob = new_log_prob
else:
if log_prob.size() != new_log_prob.size():
raise ValueError(f"Two PDFs, {prev_dist.prob_text} and {factor.dist.prob_text}, have different sizes,"
" so you must modify these tensor sizes.")
log_prob += new_log_prob
prev_dist = factor.dist
if log_prob is None:
return 0
return log_prob
def get_params(self, params_dict={}, **kwargs):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.get_params(params_dict, **kwargs)
return result
def sample_mean(self, x_dict={}):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.sample_variance(x_dict)
return result
def sample_variance(self, x_dict={}):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.sample_variance(x_dict)
return result
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.get_entropy(x_dict, sum_features, feature_dims)
return result
@property
def has_reparam(self):
return all(factor.dist.has_reparam for factor in self.factors())
def __str__(self):
network_text = "\n".join(str(factor) for factor in self.factors(sorted=True))
return _make_distribution_text(self.prob_joint_factorized_and_text, network_text)
@property
def prob_text(self):
return _make_prob_text(self.name, self.var, self.cond_var)
@property
def prob_factorized_text(self):
text = ""
for factor in self.factors(sorted=True):
text = factor.prob_text + text
if self.marginalize_list:
integral_symbol = len(self.marginalize_list) * "\\int "
integral_variables = ["d" + convert_latex_name(var) for var in self.marginalize_list]
integral_variables = "".join(integral_variables)
return f"{integral_symbol}{text}{integral_variables}"
return text
@property
def prob_joint_factorized_and_text(self):
return _make_prob_equality_text(self.prob_text, self.prob_factorized_text)
def visible_graph(self, dotmode=False):
visible_graph = nx.DiGraph()
def dont_esc(name: str):
return f"${name}$"
for factor in self.factors():
for var_name in factor.var:
for cond_var_name in factor.cond_var:
if dotmode:
visible_graph.add_edge(cond_var_name, var_name)
else:
visible_graph.add_edge(dont_esc(cond_var_name), dont_esc(var_name))
if dotmode:
for var_name in visible_graph:
visible_graph.add_node(var_name, texlbl=dont_esc(var_name))
return visible_graph
class Distribution(nn.Module):
"""Distribution class. In Pixyz, all distributions are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[64], name="p1")
>>> print(p1)
Distribution:
p_{1}(x)
Network architecture:
Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([64])
(loc): torch.Size([1, 64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[64], name="p2")
>>> print(p2)
Distribution:
p_{2}(x|y)
Network architecture:
Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution (by neural networks)
>>> class P(Normal):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["y"],name="p3")
... self.model_loc = nn.Linear(128, 64)
... self.model_scale = nn.Linear(128, 64)
... def forward(self, y):
... return {"loc": self.model_loc(y), "scale": F.softplus(self.model_scale(y))}
>>> p3 = P()
>>> print(p3)
Distribution:
p_{3}(x|y)
Network architecture:
P(
name=p_{3}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
(model_loc): Linear(in_features=128, out_features=64, bias=True)
(model_scale): Linear(in_features=128, out_features=64, bias=True)
)
"""
def __init__(self, var, cond_var=[], name="p", features_shape=torch.Size(), atomic=True):
"""
Parameters
----------
var : :obj:`list` of :obj:`str`
Variables of this distribution.
cond_var : :obj:`list` of :obj:`str`, defaults to []
Conditional variables of this distribution.
In case that cond_var is not empty, we must set the corresponding inputs to sample variables.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
super().__init__()
_vars = cond_var + var
if len(_vars) != len(set(_vars)):
raise ValueError("There are conflicted variables.")
self._cond_var = cond_var
self._var = var
self._name = convert_latex_name(name)
self._atomic = atomic
if atomic and len(var) == 0:
raise ValueError("At least one variable is required for an atomic distribution.")
self._graph = None
self._features_shape = torch.Size(features_shape)
@property
def graph(self):
if self._atomic:
if not self._graph:
# (graph,) for escaping meta-language of nn.Module
self._graph = (DistGraph().appended(atom_dist=self),)
return self._graph[0]
else:
return self._graph
@property
def distribution_name(self):
"""str: Name of this distribution class."""
return ""
@property
def name(self):
"""str: Name of this distribution displayed in :obj:`prob_text` and :obj:`prob_factorized_text`."""
return self._name
@name.setter
def name(self, name):
if type(name) is str:
self._name = name
if self._atomic:
self.graph.name = name
return
raise ValueError("Name of the distribution class must be a string type.")
@property
def var(self):
"""list: Variables of this distribution."""
return self._var if self._atomic else self.graph.var
@property
def cond_var(self):
"""list: Conditional variables of this distribution."""
return self._cond_var if self._atomic else self.graph.cond_var
@property
def input_var(self):
"""list: Input variables of this distribution.
Normally, it has same values as :attr:`cond_var`.
"""
return self._cond_var if self._atomic else self.graph.input_var
@property
def prob_text(self):
"""str: Return a formula of the (joint) probability distribution."""
if not self._atomic:
return self.graph.prob_text
return _make_prob_text(self._name, self.var, self.cond_var)
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
if not self._atomic:
return self.graph.prob_factorized_text
return self.prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized and the (joint) probability distributions."""
if not self._atomic:
return self.graph.prob_joint_factorized_and_text
return _make_prob_equality_text(self.prob_text, self.prob_factorized_text)
@property
def features_shape(self):
"""torch.Size or list: Shape of features of this distribution."""
return self._features_shape
def _get_input_dict(self, input, var=None):
"""Check the type of given input.
If the input type is :obj:`dict`, this method checks whether the input keys contains the :attr:`var` list.
In case that its type is :obj:`list` or :obj:`tensor`, it returns the output formatted in :obj:`dict`.
Parameters
----------
input : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`
Input variables.
var : :obj:`list` or :obj:`NoneType`, defaults to None
Variables to check if given input contains them.
This is set to None by default.
Returns
-------
input_dict : dict
Variables checked in this method.
Raises
------
ValueError
Raises `ValueError` if the type of input is neither :obj:`torch.Tensor`, :obj:`list`, nor :obj:`dict.
"""
if var is None:
var = self.input_var
if type(input) is torch.Tensor:
input_dict = {var[0]: input}
elif type(input) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
input_dict = dict(zip(var, input))
elif type(input) is dict:
if not (set(input) >= set(var)):
raise ValueError(f"Input keys are not valid, expected {set(var)} but got {set(input)}.")
input_dict = get_dict_values(input, var, return_dict=True)
else:
raise ValueError("The type of input is not valid, got %s." % type(input))
return input_dict
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True,
reparam=False, sample_mean=False, **kwargs):
"""Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p = Normal(loc=0, scale=1, var=["x"], features_shape=[10, 2])
>>> print(p)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([10, 2])
(loc): torch.Size([1, 10, 2])
(scale): torch.Size([1, 10, 2])
)
>>> p.sample()["x"].shape # (batch_n=1, features_shape)
torch.Size([1, 10, 2])
>>> p.sample(batch_n=20)["x"].shape # (batch_n, features_shape)
torch.Size([20, 10, 2])
>>> p.sample(batch_n=20, sample_shape=[40, 30])["x"].shape # (sample_shape, batch_n, features_shape)
torch.Size([40, 30, 20, 10, 2])
>>> # Conditional distribution
>>> p = Normal(loc="y", scale=1., var=["x"], cond_var=["y"], features_shape=[10])
>>> print(p)
Distribution:
p(x|y)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([10])
(scale): torch.Size([1, 10])
)
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> sample_a = torch.randn(1, 10) # Psuedo data
>>> sample = p.sample({"y": sample_y})
>>> print(sample) # input_var + var # doctest: +SKIP
{'y': tensor([[-0.5182, 0.3484, 0.9042, 0.1914, 0.6905,
-1.0859, -0.4433, -0.0255, 0.8198, 0.4571]]),
'x': tensor([[-0.7205, -1.3996, 0.5528, -0.3059, 0.5384,
-1.4976, -0.1480, 0.0841,0.3321, 0.5561]])}
>>> sample = p.sample({"y": sample_y, "a": sample_a}) # Redundant input ("a")
>>> print(sample) # input_var + var + "a" (redundant input) # doctest: +SKIP
{'y': tensor([[ 1.3582, -1.1151, -0.8111, 1.0630, 1.1633,
0.3855, 2.6324, -0.9357, -0.8649, -0.6015]]),
'a': tensor([[-0.1874, 1.7958, -1.4084, -2.5646, 1.0868,
-0.7523, -0.0852, -2.4222, -0.3914, -0.9755]]),
'x': tensor([[-0.3272, -0.5222, -1.3659, 1.8386, 2.3204,
0.3686, 0.6311, -1.1208, 0.3656, -0.6683]])}
"""
if self.graph:
return self.graph.sample(x_dict, batch_n, sample_shape, return_all, reparam, sample_mean, **kwargs)
raise NotImplementedError()
@property
def has_reparam(self):
if self.graph:
return self.graph.has_reparam
raise NotImplementedError()
def sample_mean(self, x_dict={}):
"""Return the mean of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> mean = p1.sample_mean()
>>> print(mean)
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> mean = p2.sample_mean({"y": sample_y})
>>> print(mean) # doctest: +SKIP
tensor([[-0.2189, -1.0310, -0.1917, -0.3085, 1.5190, -0.9037, 1.2559, 0.1410,
1.2810, -0.6681]])
"""
if self.graph:
return self.graph.sample_mean(x_dict)
raise NotImplementedError()
def sample_variance(self, x_dict={}):
"""Return the variance of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> var = p1.sample_variance()
>>> print(var)
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> var = p2.sample_variance({"y": sample_y})
>>> print(var) # doctest: +SKIP
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
"""
if self.graph:
return self.graph.sample_variance(x_dict)
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
"""Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
if self.graph:
return self.graph.get_log_prob(x_dict, sum_features, feature_dims, **kwargs)
raise NotImplementedError()
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of entropy.
Parameters
----------
x_dict : dict, defaults to {}
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
entropy : torch.Tensor
Values of entropy.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> entropy = p1.get_entropy()
>>> print(entropy)
tensor([14.1894])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> entropy = p2.get_entropy({"y": sample_y})
>>> print(entropy)
tensor([14.1894])
"""
if self.graph:
return self.graph.get_entropy(x_dict, sum_features, feature_dims)
raise NotImplementedError()
def get_params(self, params_dict={}, **kwargs):
if self.graph:
return self.graph.get_params(params_dict, **kwargs)
raise NotImplementedError()
def log_prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set axes to sum across the output.
Returns
-------
pixyz.losses.LogProb
An instance of :class:`pixyz.losses.LogProb`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob().eval({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob().eval({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
return LogProb(self, sum_features=sum_features, feature_dims=feature_dims)
def prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.Prob`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Choose whether the output is summed across some axes (dimensions)
which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output. (Note: this parameter is not used for now.)
Returns
-------
pixyz.losses.Prob
An instance of :class:`pixyz.losses.Prob`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> prob = p1.prob().eval({"x": sample_x})
>>> print(prob) # doctest: +SKIP
tensor([4.0933e-07])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> prob = p2.prob().eval({"x": sample_x, "y": sample_y})
>>> print(prob) # doctest: +SKIP
tensor([2.9628e-09])
"""
return Prob(self, sum_features=sum_features, feature_dims=feature_dims)
def forward(self, *args, **kwargs):
"""When this class is inherited by DNNs, this method should be overrided."""
raise NotImplementedError()
def replace_var(self, **replace_dict):
"""Return an instance of :class:`pixyz.distributions.ReplaceVarDistribution`.
Parameters
----------
replace_dict : dict
Dictionary.
Returns
-------
pixyz.distributions.ReplaceVarDistribution
An instance of :class:`pixyz.distributions.ReplaceVarDistribution`
"""
return ReplaceVarDistribution(self, replace_dict)
def marginalize_var(self, marginalize_list):
"""Return an instance of :class:`pixyz.distributions.MarginalizeVarDistribution`.
Parameters
----------
marginalize_list : :obj:`list` or other
Variables to marginalize.
Returns
-------
pixyz.distributions.MarginalizeVarDistribution
An instance of :class:`pixyz.distributions.MarginalizeVarDistribution`
"""
marginalize_list = tolist(marginalize_list)
return MarginalizeVarDistribution(self, marginalize_list)
def __mul__(self, other):
return MultiplyDistribution(self, other)
def __str__(self):
if not self._atomic:
return str(self.graph)
network_text = self.__repr__()
return _make_distribution_text(self.prob_joint_factorized_and_text, network_text)
def extra_repr(self):
# parameters
parameters_text = f'name={self.name}, distribution_name={self.distribution_name},\n' \
f'var={self.var}, cond_var={self.cond_var}, input_var={self.input_var}, ' \
f'features_shape={self.features_shape}'
if len(self._buffers) != 0:
# add buffers to repr
buffers = [f"({key}): {value.shape}" for key, value in self._buffers.items()]
return parameters_text + "\n" + "\n".join(buffers)
return parameters_text
class DistributionBase(Distribution):
"""Distribution class with PyTorch. In Pixyz, all distributions are required to inherit this class."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), **kwargs):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self._set_buffers(**kwargs)
self._dist = None
def _set_buffers(self, **params_dict):
"""Format constant parameters of this distribution as buffers.
Parameters
----------
params_dict : dict
Constant parameters of this distribution set at initialization.
If the values of these dictionaries contain parameters which are named as strings, which means that
these parameters are set as `variables`, the correspondences between these values and the true name of
these parameters are stored as :obj:`dict` (:attr:`replace_params_dict`).
"""
self.replace_params_dict = {}
for key, value in params_dict.items():
if type(value) is str:
if value in self._cond_var:
if value not in self.replace_params_dict:
self.replace_params_dict[value] = []
self.replace_params_dict[value].append(key)
else:
raise ValueError(f"parameter setting {key}:{value} is not valid"
f" because cond_var does not contains {value}.")
elif isinstance(value, torch.Tensor) \
or isinstance(value, float) or isinstance(value, int):
if not isinstance(value, torch.Tensor):
features = torch.tensor(value, dtype=torch.float)
else:
features = value
features_checked = self._check_features_shape(features)
# clone features to make it contiguous & to make it independent.
self.register_buffer(key, features_checked.clone())
else:
raise ValueError(f"The types that can be specified as parameters of distribution"
f" are limited to str & torch.Tensor. Got: {type(value)}")
def _check_features_shape(self, features):
# scalar
if features.size() == torch.Size():
features = features.expand(self.features_shape)
if self.features_shape == torch.Size():
self._features_shape = features.shape
if features.size() == self.features_shape:
batches = features.unsqueeze(0)
return batches
raise ValueError(f"the shape of a given parameter {features.size()}"
f" and features_shape {self.features_shape} do not match.")
@property
def params_keys(self):
"""list: Return the list of parameter names for this distribution."""
raise NotImplementedError()
@property
def distribution_torch_class(self):
"""Return the class of PyTorch distribution."""
raise NotImplementedError()
@property
def dist(self):
"""Return the instance of PyTorch distribution."""
return self._dist
def set_dist(self, x_dict={}, batch_n=None, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError(f"{type(self)} class requires following parameters: {set(self.params_keys)}\n"
f"but got {set(params.keys())}")
self._dist = self.distribution_torch_class(**params)
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError(f"Batch shape mismatch. batch_shape from parameters: {batch_shape}\n"
f" specified batch size:{batch_n}")
def get_sample(self, reparam=False, sample_shape=torch.Size()):
"""Get a sample_shape shaped sample from :attr:`dist`.
Parameters
----------
reparam : :obj:`bool`, defaults to True.
Choose where to sample using re-parameterization trick.
sample_shape : :obj:`tuple` or :obj:`torch.Size`, defaults to torch.Size().
Set the shape of a generated sample.
Returns
-------
samples_dict : dict
Generated sample formatted by :obj:`dict`.
"""
if reparam and self.dist.has_rsample:
_samples = self.dist.rsample(sample_shape=sample_shape)
else:
_samples = self.dist.sample(sample_shape=sample_shape)
samples_dict = {self._var[0]: _samples}
return samples_dict
@property
def has_reparam(self):
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict)
x_targets = get_dict_values(x_dict, self._var)
if len(x_targets) == 0:
raise ValueError(f"x_dict has no value of the stochastic variable. x_dict: {x_dict}")
log_prob = self.dist.log_prob(*x_targets)
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
"""This method aims to get parameters of this distributions from constant parameters set in initialization
and outputs of DNNs.
Parameters
----------
params_dict : :obj:`dict`, defaults to {}
Input parameters.
Returns
-------
output_dict : dict
Output parameters.
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist_1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[1])
>>> print(dist_1)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> dist_1.get_params()
{'loc': tensor([[0.]]), 'scale': tensor([[1.]])}
>>> dist_2 = Normal(loc=torch.tensor(0.), scale="z", cond_var=["z"], var=["x"])
>>> print(dist_2)
Distribution:
p(x|z)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(loc): torch.Size([1])
)
>>> dist_2.get_params({"z": torch.tensor(1.)})
{'scale': tensor(1.), 'loc': tensor([0.])}
"""
replaced_params_dict = {}
for key, value in params_dict.items():
if key in self.replace_params_dict:
for replaced_key in self.replace_params_dict[key]:
replaced_params_dict[replaced_key] = value
vars_dict = {key: value for key, value in params_dict.items() if key not in self.replace_params_dict}
output_dict = self(**vars_dict)
output_dict.update(replaced_params_dict)
# append constant parameters to output_dict
constant_params_dict = get_dict_values(dict(self.named_buffers()), self.params_keys,
return_dict=True)
output_dict.update(constant_params_dict)
return output_dict
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict)
entropy = self.dist.entropy()
if sum_features:
entropy = sum_samples(entropy, feature_dims)
return entropy
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.mean
def sample_variance(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.variance
def forward(self, **params):
return params
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
return self.graph.prob_text
class MultiplyDistribution(Distribution):
"""Multiply by given distributions, e.g, :math:`p(x,y|z) = p(x|z,y)p(y|z)`.
In this class, it is checked if two distributions can be multiplied.
p(x|z)p(z|y) -> Valid
p(x|z)p(y|z) -> Valid
p(x|z)p(y|a) -> Valid
p(x|z)p(z|x) -> Invalid (recursive)
p(x|z)p(x|y) -> Invalid (conflict)
Examples
--------
>>> a = DistributionBase(var=["x"],cond_var=["z"])
>>> b = DistributionBase(var=["z"],cond_var=["y"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,z|y) = p(x|z)p(z|y)
Network architecture:
p(z|y):
DistributionBase(
name=p, distribution_name=,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"],cond_var=["z"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"],cond_var=["a"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z,a) = p(x|z)p(y|a)
Network architecture:
p(y|a):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['a'], input_var=['a'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, a, b):
"""
Parameters
----------
a : pixyz.Distribution
Distribution.
b : pixyz.Distribution
Distribution.
"""
super().__init__(var=[], atomic=False)
self._graph = a.graph.united(b.graph)
def __repr__(self):
return repr(self.graph)
class ReplaceVarDistribution(Distribution):
"""Replace names of variables in Distribution.
Examples
--------
>>> p = DistributionBase(var=["x"],cond_var=["z"])
>>> print(p)
Distribution:
p(x|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> replace_dict = {'x': 'y'}
>>> p_repl = ReplaceVarDistribution(p, replace_dict)
>>> print(p_repl)
Distribution:
p(y|z)
Network architecture:
p(y|z) -> p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p, replace_dict):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.MultiplyDistribution`)
Distribution.
replace_dict : dict
Dictionary.
"""
super().__init__(var=[], cond_var=[], name=p.name, features_shape=p.features_shape, atomic=False)
self._graph = p.graph.var_replaced(replace_dict)
self.p = p
def __repr__(self):
return repr(self.graph)
def forward(self, *args, **kwargs):
return self.p(*args, **kwargs)
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
import warnings
warnings.warn("this magic method will be deprecated.")
return self.p.__getattribute__(item)
class MarginalizeVarDistribution(Distribution):
r"""Marginalize variables in Distribution.
.. math::
p(x) = \int p(x,z) dz
Examples
--------
>>> a = DistributionBase(var=["x"],cond_var=["z"])
>>> b = DistributionBase(var=["y"],cond_var=["z"])
>>> p_multi = a * b
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> p_marg = MarginalizeVarDistribution(p_multi, ["y"])
>>> print(p_marg)
Distribution:
p(x|z) = \int p(x|z)p(y|z)dy
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p: Distribution, marginalize_list):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.DistributionBase`)
Distribution.
marginalize_list : list
Variables to marginalize.
"""
marginalize_list = tolist(marginalize_list)
super().__init__(var=[], cond_var=[], name=p.name, features_shape=p.features_shape, atomic=False)
self._graph = p.graph.marginalized(marginalize_list)
self.p = p
def __repr__(self):
return repr(self.graph)
def forward(self, *args, **kwargs):
return self.p(*args, **kwargs)
def sample_mean(self, x_dict={}):
return self.p.sample_mean(x_dict)
def sample_variance(self, x_dict={}):
return self.p.sample_variance(x_dict)
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
return self.p.get_entropy(x_dict, sum_features, feature_dims)
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
import warnings
warnings.warn("this magic method will be deprecated.")
return self.p.__getattribute__(item)
| 70,384
| 36.800752
| 137
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/exponential_distributions.py
|
import torch
from torch.distributions import Normal as NormalTorch
from torch.distributions import Bernoulli as BernoulliTorch
from torch.distributions import RelaxedBernoulli as RelaxedBernoulliTorch
from torch.distributions import RelaxedOneHotCategorical as RelaxedOneHotCategoricalTorch
from torch.distributions.one_hot_categorical import OneHotCategorical as CategoricalTorch
from torch.distributions import Multinomial as MultinomialTorch
from torch.distributions import Dirichlet as DirichletTorch
from torch.distributions import Beta as BetaTorch
from torch.distributions import Laplace as LaplaceTorch
from torch.distributions import Gamma as GammaTorch
from torch.distributions.utils import broadcast_all
from torch.nn.functional import binary_cross_entropy_with_logits
from ..utils import get_dict_values, sum_samples
from .distributions import DistributionBase
def _valid_param_dict(raw_dict):
return {var_name: value for var_name, value in raw_dict.items() if value is not None}
class Normal(DistributionBase):
"""Normal distribution parameterized by :attr:`loc` and :attr:`scale`. """
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), loc=None, scale=None):
super().__init__(var, cond_var, name, features_shape, **_valid_param_dict({'loc': loc, 'scale': scale}))
@property
def params_keys(self):
return ["loc", "scale"]
@property
def distribution_torch_class(self):
return NormalTorch
@property
def distribution_name(self):
return "Normal"
@property
def has_reparam(self):
return True
class BernoulliTorchOld(BernoulliTorch):
def log_prob(self, value):
logits, value = broadcast_all(self.logits, value)
return -binary_cross_entropy_with_logits(logits, value, reduction='none')
class Bernoulli(DistributionBase):
"""Bernoulli distribution parameterized by :attr:`probs`."""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var, cond_var, name, features_shape, **_valid_param_dict({'probs': probs}))
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return BernoulliTorchOld
@property
def distribution_name(self):
return "Bernoulli"
@property
def has_reparam(self):
return False
class RelaxedBernoulli(Bernoulli):
"""Relaxed (re-parameterizable) Bernoulli distribution parameterized by :attr:`probs` and :attr:`temperature`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), temperature=torch.tensor(0.1),
probs=None):
super(Bernoulli, self).__init__(var, cond_var, name, features_shape, **_valid_param_dict({
'probs': probs, 'temperature': temperature}))
@property
def params_keys(self):
return ["probs", "temperature"]
@property
def distribution_torch_class(self):
"""Use relaxed version only when sampling"""
return RelaxedBernoulliTorch
@property
def distribution_name(self):
return "RelaxedBernoulli"
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sampling : :obj:`bool` defaults to False.
If it is false, the distribution will not be relaxed to compute log_prob.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError("{} class requires following parameters: {}\n"
"but got {}".format(type(self), set(self.params_keys), set(params.keys())))
if sampling:
self._dist = self.distribution_torch_class(**params)
else:
hard_params_keys = ["probs"]
self._dist = BernoulliTorchOld(**get_dict_values(params, hard_params_keys, return_dict=True))
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n, sampling=True)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return True
class FactorizedBernoulli(Bernoulli):
"""
Factorized Bernoulli distribution parameterized by :attr:`probs`.
References
----------
[Vedantam+ 2017] Generative Models of Visually Grounded Imagination
"""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape, probs=probs)
@property
def distribution_name(self):
return "FactorizedBernoulli"
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
log_prob = super().get_log_prob(x_dict, sum_features=False, **kwargs)
[_x] = get_dict_values(x_dict, self._var)
log_prob[_x == 0] = 0
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
class CategoricalTorchOld(CategoricalTorch):
def log_prob(self, value):
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
class Categorical(DistributionBase):
"""Categorical distribution parameterized by :attr:`probs`."""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'probs': probs}))
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return CategoricalTorchOld
@property
def distribution_name(self):
return "Categorical"
@property
def has_reparam(self):
return False
class RelaxedCategorical(Categorical):
"""
Relaxed (re-parameterizable) categorical distribution parameterized by :attr:`probs` and :attr:`temperature`.
Notes: a shape of temperature should contain the event shape of this Categorical distribution.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), temperature=torch.tensor(0.1),
probs=None):
super(Categorical, self).__init__(var, cond_var, name, features_shape,
**_valid_param_dict({'probs': probs, 'temperature': temperature}))
@property
def params_keys(self):
return ['probs', 'temperature']
@property
def distribution_torch_class(self):
"""Use relaxed version only when sampling"""
return RelaxedOneHotCategoricalTorch
@property
def distribution_name(self):
return "RelaxedCategorical"
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sampling : :obj:`bool` defaults to False.
If it is false, the distribution will not be relaxed to compute log_prob.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError("{} class requires following parameters: {}\n"
"but got {}".format(type(self), set(self.params_keys), set(params.keys())))
if sampling:
self._dist = self.distribution_torch_class(**params)
else:
hard_params_keys = ["probs"]
self._dist = BernoulliTorchOld(**get_dict_values(params, hard_params_keys, return_dict=True))
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n, sampling=True)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return True
class Multinomial(DistributionBase):
"""Multinomial distribution parameterized by :attr:`total_count` and :attr:`probs`."""
def __init__(self, total_count=1, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), probs=None):
self._total_count = total_count
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'probs': probs}))
@property
def total_count(self):
return self._total_count
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return MultinomialTorch
@property
def distribution_name(self):
return "Multinomial"
@property
def has_reparam(self):
return False
class Dirichlet(DistributionBase):
"""Dirichlet distribution parameterized by :attr:`concentration`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration': concentration}))
@property
def params_keys(self):
return ["concentration"]
@property
def distribution_torch_class(self):
return DirichletTorch
@property
def distribution_name(self):
return "Dirichlet"
@property
def has_reparam(self):
return True
class Beta(DistributionBase):
"""Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration1=None,
concentration0=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration1': concentration1, 'concentration0': concentration0}))
@property
def params_keys(self):
return ["concentration1", "concentration0"]
@property
def distribution_torch_class(self):
return BetaTorch
@property
def distribution_name(self):
return "Beta"
@property
def has_reparam(self):
return True
class Laplace(DistributionBase):
"""
Laplace distribution parameterized by :attr:`loc` and :attr:`scale`.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), loc=None, scale=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'loc': loc, 'scale': scale}))
@property
def params_keys(self):
return ["loc", "scale"]
@property
def distribution_torch_class(self):
return LaplaceTorch
@property
def distribution_name(self):
return "Laplace"
@property
def has_reparam(self):
return True
class Gamma(DistributionBase):
"""
Gamma distribution parameterized by :attr:`concentration` and :attr:`rate`.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration=None, rate=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration': concentration, 'rate': rate}))
@property
def params_keys(self):
return ["concentration", "rate"]
@property
def distribution_torch_class(self):
return GammaTorch
@property
def distribution_name(self):
return "Gamma"
@property
def has_reparam(self):
return True
| 14,788
| 33.154734
| 117
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/poe.py
|
from __future__ import print_function
import torch
from torch import nn
from ..utils import tolist, get_dict_values
from ..distributions import Normal
class ProductOfNormal(Normal):
r"""Product of normal distributions.
.. math::
p(z|x,y) \propto p(z)p(z|x)p(z|y)
In this models, :math:`p(z|x)` and :math:`p(a|y)` perform as `experts` and :math:`p(z)` corresponds
a prior of `experts`.
References
----------
[Vedantam+ 2017] Generative Models of Visually Grounded Imagination
[Wu+ 2018] Multimodal Generative Models for Scalable Weakly-Supervised Learning
Examples
--------
>>> pon = ProductOfNormal([p_x, p_y]) # doctest: +SKIP
>>> pon.sample({"x": x, "y": y}) # doctest: +SKIP
{'x': tensor([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]],),
'y': tensor([[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 1., ..., 0., 0., 0.],
[0., 1., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 1., 0.],
[1., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 1.]]),
'z': tensor([[ 0.6611, 0.3811, 0.7778, ..., -0.0468, -0.3615, -0.6569],
[-0.0071, -0.9178, 0.6620, ..., -0.1472, 0.6023, 0.5903],
[-0.3723, -0.7758, 0.0195, ..., 0.8239, -0.3537, 0.3854],
...,
[ 0.7820, -0.4761, 0.1804, ..., -0.5701, -0.0714, -0.5485],
[-0.1873, -0.2105, -0.1861, ..., -0.5372, 0.0752, 0.2777],
[-0.2563, -0.0828, 0.1605, ..., 0.2767, -0.8456, 0.7364]])}
>>> pon.sample({"y": y}) # doctest: +SKIP
{'y': tensor([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 1., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 1., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]),
'z': tensor([[-0.3264, -0.4448, 0.3610, ..., -0.7378, 0.3002, 0.4370],
[ 0.0928, -0.1830, 1.1768, ..., 1.1808, -0.7226, -0.4152],
[ 0.6999, 0.2222, -0.2901, ..., 0.5706, 0.7091, 0.5179],
...,
[ 0.5688, -1.6612, -0.0713, ..., -0.1400, -0.3903, 0.2533],
[ 0.5412, -0.0289, 0.6365, ..., 0.7407, 0.7838, 0.9218],
[ 0.0299, 0.5148, -0.1001, ..., 0.9938, 1.0689, -1.1902]])}
>>> pon.sample() # same as sampling from unit Gaussian. # doctest: +SKIP
{'z': tensor(-0.4494)}
"""
def __init__(self, p=[], weight_modalities=None, name="p", features_shape=torch.Size()):
"""
Parameters
----------
p : :obj:`list` of :class:`pixyz.distributions.Normal`.
List of experts.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
Examples
--------
>>> p_x = Normal(cond_var=['z'], loc='z', scale=torch.ones(1, 1))
>>> pon = ProductOfNormal([p_x])
>>> sample = pon.sample({'z': torch.zeros(1, 1)})
>>> sample # doctest: +SKIP
"""
p = tolist(p)
if len(p) == 0:
raise ValueError()
if weight_modalities is not None:
if len(weight_modalities) != len(p) + 1:
raise ValueError()
var = p[0].var
cond_var = []
for _p in p:
if _p.var != var:
raise ValueError()
if _p.distribution_name != "Normal":
raise ValueError()
cond_var += _p.cond_var
self.input_ids = [[] for _ in p]
self.save_output_dict = 0
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self.p = nn.ModuleList(p)
if weight_modalities is None:
self.weight_modalities = [1. for _ in range(len(self.p) + 1)]
else:
self.weight_modalities = weight_modalities
@property
def prob_factorized_text(self):
prob_text = "p({})".format(
','.join(self._var)
)
if len(self._cond_var) != 0:
prob_text += "".join([p.prob_text for p in self.p])
return prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized probability distribution."""
if self.prob_factorized_text == self.prob_text:
prob_text = self.prob_text
else:
prob_text = "{} \\propto {}".format(self.prob_text, self.prob_factorized_text)
return prob_text
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
weight : np.array
(n_expert, )
"""
loc = []
scale = []
weight = [self.weight_modalities[0]]
for i, _p in enumerate(self.p):
inputs_dict = get_dict_values(params_dict, _p.cond_var, True)
if len(inputs_dict) != 0:
outputs = _p.get_params(inputs_dict, **kwargs)
loc.append(outputs["loc"])
scale.append(outputs["scale"])
weight.append(self.weight_modalities[i + 1])
loc = torch.stack(loc)
scale = torch.stack(scale)
weight = torch.Tensor(weight).to(scale.device)
# expand weight
for i in range(len(loc.shape) - 1):
weight = weight.unsqueeze(-1)
return loc, scale, weight
def get_params(self, params_dict={}, **kwargs):
_input_ids = [id(v) for v in list(params_dict.values())]
if _input_ids == self.input_ids:
return self.save_output_dict
else:
# experts
if len(params_dict) > 0:
loc, scale, weight = self._get_expert_params(params_dict, **kwargs) # (n_expert, n_batch, output_dim)
else:
loc = torch.zeros(1)
scale = torch.zeros(1)
weight = torch.ones(1).to(scale.device)
output_loc, output_scale = self._compute_expert_params(loc, scale, weight)
output_dict = {"loc": output_loc, "scale": output_scale}
self.save_output_dict = output_dict
self.input_ids = _input_ids
return output_dict
@staticmethod
def _compute_expert_params(loc, scale, weight):
"""Compute parameters for the product of experts.
Is is assumed that unspecified experts are excluded from inputs.
Parameters
----------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
Returns
-------
output_loc : torch.Tensor
Mean vectors for this distribution. (n_batch, output_dim)
output_scale : torch.Tensor
The square root of diagonal covariance matrices for this distribution. (n_batch, output_dim)
"""
variance = scale ** 2
# parameter for prior
prior_prec = 1 # prior_loc is not specified because it is equal to 0.
# compute the diagonal precision matrix.
prec = torch.zeros_like(variance).type(scale.dtype)
prec[variance != 0] = 1. / variance[variance != 0]
# compute the square root of a diagonal covariance matrix for the product of distributions.
output_prec = torch.sum(weight[1:] * prec, dim=0) + weight[0] * prior_prec
output_variance = 1. / output_prec # (n_batch, output_dim)
# compute the mean vectors for the product of normal distributions.
output_loc = torch.sum(weight[1:] * prec * loc, dim=0) # (n_batch, output_dim)
output_loc = output_loc * output_variance
return output_loc, torch.sqrt(output_variance)
def _get_input_dict(self, x, var=None):
if var is None:
var = self.input_var
if type(x) is torch.Tensor:
checked_x = {var[0]: x}
elif type(x) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
checked_x = dict(zip(var, x))
elif type(x) is dict:
# point of modification
checked_x = x
else:
raise ValueError("The type of input is not valid, got %s." % type(x))
return get_dict_values(checked_x, var, return_dict=True)
def log_prob(self, sum_features=True, feature_dims=None):
raise NotImplementedError()
def prob(self, sum_features=True, feature_dims=None):
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
raise NotImplementedError()
class ElementWiseProductOfNormal(ProductOfNormal):
r"""Product of normal distributions.
In this distribution, each element of the input vector on the given distribution is considered as
a different expert.
.. math::
p(z|x) = p(z|x_1, x_2) \propto p(z)p(z|x_1)p(z|x_2)
Examples
--------
>>> pon = ElementWiseProductOfNormal(p) # doctest: +SKIP
>>> pon.sample({"x": x}) # doctest: +SKIP
{'x': tensor([[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]]),
'z': tensor([[-0.3572, -0.0632, 0.4872, 0.2269, -0.1693, -0.0160, -0.0429, 0.2017,
-0.1589, -0.3380, -0.9598, 0.6216, -0.4296, -1.1349, 0.0901, 0.3994,
0.2313, -0.5227, -0.7973, 0.3968, 0.7137, -0.5639, -0.4891, -0.1249,
0.8256, 0.1463, 0.0801, -1.2202, 0.6984, -0.4036, 0.4960, -0.4376,
0.3310, -0.2243, -0.2381, -0.2200, 0.8969, 0.2674, 0.4681, 1.6764,
0.8127, 0.2722, -0.2048, 0.1903, -0.1398, 0.0099, 0.4382, -0.8016,
0.9947, 0.7556, -0.2017, -0.3920, 1.4212, -1.2529, -0.1002, -0.0031,
0.1876, 0.4267, 0.3622, 0.2648, 0.4752, 0.0843, -0.3065, -0.4922],
[ 0.3770, -0.0413, 0.9102, 0.2897, -0.0567, 0.5211, 1.5233, -0.3539,
0.5163, -0.2271, -0.1027, 0.0294, -1.4617, 0.1640, 0.2025, -0.2190,
0.0555, 0.5779, -0.2930, -0.2161, 0.2835, -0.0354, -0.2569, -0.7171,
0.0164, -0.4080, 1.1088, 0.3947, 0.2720, -0.0600, -0.9295, -0.0234,
0.5624, 0.4866, 0.5285, 1.1827, 0.2494, 0.0777, 0.7585, 0.5127,
0.7500, -0.3253, 0.0250, 0.0888, 1.0340, -0.1405, -0.8114, 0.4492,
0.2725, -0.0270, 0.6379, -0.8096, 0.4259, 0.3179, -0.1681, 0.3365,
0.6305, 0.5203, 0.2384, 0.0572, 0.4804, 0.9553, -0.3244, 1.5373]])}
>>> pon.sample({"x": torch.zeros_like(x)}) # same as sampling from unit Gaussian. # doctest: +SKIP
{'x': tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]),
'z': tensor([[-0.7777, -0.5908, -1.5498, -0.7505, 0.6201, 0.7218, 1.0045, 0.8923,
-0.8030, -0.3569, 0.2932, 0.2122, 0.1640, 0.7893, -0.3500, -1.0537,
-1.2769, 0.6122, -1.0083, -0.2915, -0.1928, -0.7486, 0.2418, -1.9013,
1.2514, 1.3035, -0.3029, -0.3098, -0.5415, 1.1970, -0.4443, 2.2393,
-0.6980, 0.2820, 1.6972, 0.6322, 0.4308, 0.8953, 0.7248, 0.4440,
2.2770, 1.7791, 0.7563, -1.1781, -0.8331, 0.1825, 1.5447, 0.1385,
-1.1348, 0.0257, 0.3374, 0.5889, 1.1231, -1.2476, -0.3801, -1.4404,
-1.3066, -1.2653, 0.5958, -1.7423, 0.7189, -0.7236, 0.2330, 0.3117],
[ 0.5495, 0.7210, -0.4708, -2.0631, -0.6170, 0.2436, -0.0133, -0.4616,
-0.8091, -0.1592, 1.3117, 0.0276, 0.6625, -0.3748, -0.5049, 1.8260,
-0.3631, 1.1546, -1.0913, 0.2712, 1.5493, 1.4294, -2.1245, -2.0422,
0.4976, -1.2785, 0.5028, 1.4240, 1.1983, 0.2468, 1.1682, -0.6725,
-1.1198, -1.4942, -0.3629, 0.1325, -0.2256, 0.4280, 0.9830, -1.9427,
-0.2181, 1.1850, -0.7514, -0.8172, 2.1031, -0.1698, -0.3777, -0.7863,
1.0936, -1.3720, 0.9999, 1.3302, -0.8954, -0.5999, 2.3305, 0.5702,
-1.0767, -0.2750, -0.3741, -0.7026, -1.5408, 0.0667, 1.2550, -0.5117]])}
"""
def __init__(self, p, name="p", features_shape=torch.Size()):
r"""
Parameters
----------
p : pixyz.distributions.Normal
Each element of this input vector is considered as a different expert.
When some elements are 0, experts corresponding to these elements are considered not to be specified.
:math:`p(z|x) = p(z|x_1, x_2=0) \propto p(z)p(z|x_1)`
name : str, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
if len(p.cond_var) != 1:
raise ValueError()
super().__init__(p=p, name=name, features_shape=features_shape)
def _get_input_dict(self, x, var=None):
return super(ProductOfNormal)._get_input_dict(x, var)
@staticmethod
def _get_mask(inputs, index):
"""Get a mask to the input to specify an expert identified by index.
Parameters
----------
inputs : torch.Tensor
index : int
Returns
-------
torch.Tensor
"""
mask = torch.zeros_like(inputs).type(inputs.dtype)
mask[:, index] = 1
return mask
def _get_params_with_masking(self, inputs, index, **kwargs):
"""Get the output parameters of the index-specified expert.
Parameters
----------
inputs : torch.Tensor
index : int
**kwargs
Arbitrary keyword arguments.
Returns
-------
outputs : torch.Tensor
Examples
--------
>>> # pon = ElementWiseProductOfNormal(p)
>>> # a = torch.tensor([[1, 0, 0], [0, 1, 0]])
>>> # pon._get_params_with_masking(a, 0)
tensor([[[0.01, 0.0131],
[0, 0]], # loc
[[0.42, 0.39],
[1, 1]], # scale
])
>>> # pon._get_params_with_masking(a, 1)
tensor([[[0, 0],
[0.021, 0.11]], # loc
[[1, 1],
[0.293, 0.415]], # scale
])
>>> # self._get_params_with_masking(a, 2)
tensor([[[0, 0],
[0, 0]], # loc
[[1, 1],
[1, 1]], # scale
])
"""
mask = self._get_mask(inputs, index) # (n_batch, n_expert)
outputs_dict = self.p.get_params({self.cond_var[0]: inputs * mask}, **kwargs)
outputs = torch.stack([outputs_dict["loc"], outputs_dict["scale"]]) # (2, n_batch, output_dim)
# When the index-th expert in the output examples is not specified, set zero to them.
outputs[:, inputs[:, index] == 0, :] = 0
return outputs
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
torch.Tensor
torch.Tensor
"""
inputs = get_dict_values(params_dict, self.cond_var)[0] # (n_batch, n_expert=input_dim)
n_expert = inputs.size()[1]
outputs = [self._get_params_with_masking(inputs, i) for i in range(n_expert)]
outputs = torch.stack(outputs) # (n_expert, 2, n_batch, output_dim)
return outputs[:, 0, :, :], outputs[:, 1, :, :] # (n_expert, n_batch, output_dim)
| 16,619
| 38.856115
| 118
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/mixture_distributions.py
|
import torch
from torch import nn
from ..distributions.distributions import Distribution
from ..utils import convert_latex_name
class MixtureModel(Distribution):
r"""Mixture models.
.. math::
p(x) = \sum_i p(x|z=i)p(z=i)
Examples
--------
>>> from pixyz.distributions import Normal, Categorical
>>> from pixyz.distributions.mixture_distributions import MixtureModel
>>> z_dim = 3 # the number of mixture
>>> x_dim = 2 # the input dimension.
>>> distributions = [] # the list of distributions
>>> for i in range(z_dim):
... loc = torch.randn(x_dim) # initialize the value of location (mean)
... scale = torch.empty(x_dim).fill_(1.) # initialize the value of scale (variance)
... distributions.append(Normal(loc=loc, scale=scale, var=["x"], name="p_%d" %i))
>>> probs = torch.empty(z_dim).fill_(1. / z_dim) # initialize the value of probabilities
>>> prior = Categorical(probs=probs, var=["z"], name="prior")
>>> p = MixtureModel(distributions=distributions, prior=prior)
>>> print(p)
Distribution:
p(x) = p_{0}(x|z=0)prior(z=0) + p_{1}(x|z=1)prior(z=1) + p_{2}(x|z=2)prior(z=2)
Network architecture:
MixtureModel(
name=p, distribution_name=Mixture Model,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([])
(distributions): ModuleList(
(0): Normal(
name=p_{0}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
(1): Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
(2): Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
)
(prior): Categorical(
name=prior, distribution_name=Categorical,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([3])
(probs): torch.Size([1, 3])
)
)
"""
def __init__(self, distributions, prior, name="p"):
"""
Parameters
----------
distributions : list
List of distributions.
prior : pixyz.Distribution.Categorical
Prior distribution of latent variable (i.e., a contribution rate).
This should be a categorical distribution and
the number of its category should be the same as the length of :attr:`distributions`.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
"""
if not isinstance(distributions, list):
raise ValueError()
else:
distributions = nn.ModuleList(distributions)
if prior.distribution_name != "Categorical":
raise ValueError("The prior must be the categorical distribution.")
# check the number of mixture
if prior.get_params()["probs"].shape[-1] != len(distributions):
raise ValueError("The number of its category must be the same as the length of the distribution list.")
# check whether all distributions have the same variable.
var_list = []
for d in distributions:
var_list += d.var
var_list = list(set(var_list))
if len(var_list) != 1:
raise ValueError("All distributions must have the same variable.")
hidden_var = prior.var
super().__init__(var=var_list, name=name)
self.distributions = distributions
self.prior = prior
self._hidden_var = hidden_var
@property
def hidden_var(self):
"""list: Hidden variables of this distribution."""
return self._hidden_var
@property
def prob_factorized_text(self):
_mixture_prob_text = []
for i, d in enumerate(self.distributions):
_mixture_prob_text.append("{}({}|{}={}){}({}={})".format(
d.name, self.var[0], self._hidden_var[0], i,
self.prior.name, self._hidden_var[0], i
))
_prob_text = ' + '.join(_mixture_prob_text)
return _prob_text
@property
def distribution_name(self):
return "Mixture Model"
def posterior(self, name=None):
return PosteriorMixtureModel(self, name=name)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, return_hidden=False,
sample_mean=False, **kwargs):
input_dict = self._get_input_dict(x_dict)
# sample from prior
hidden_output = self.prior.sample(input_dict, batch_n=batch_n,
sample_mean=sample_mean, return_all=False, **kwargs)[self._hidden_var[0]]
var_output = []
for _hidden_output in hidden_output:
var_output.append(self.distributions[_hidden_output.argmax(dim=-1)].sample(
input_dict, sample_mean=sample_mean, return_all=False, **kwargs)[self._var[0]])
var_output = torch.cat(var_output, dim=0)
output_dict = {self._var[0]: var_output}
if return_hidden:
output_dict.update({self._hidden_var[0]: hidden_output})
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return False
def get_log_prob(self, x_dict, return_hidden=False, **kwargs):
"""Evaluate log-pdf, log p(x) (if return_hidden=False) or log p(x, z) (if return_hidden=True).
Parameters
----------
x_dict : dict
Input variables (including `var`).
return_hidden : :obj:`bool`, defaults to False
Returns
-------
log_prob : torch.Tensor
The log-pdf value of x.
return_hidden = 0 :
dim=0 : the size of batch
return_hidden = 1 :
dim=0 : the number of mixture
dim=1 : the size of batch
"""
log_prob_all = []
_device = x_dict[self._var[0]].device
eye_tensor = torch.eye(len(self.distributions)).to(_device) # for prior
for i, d in enumerate(self.distributions):
# p(z=i)
prior_log_prob = self.prior.log_prob().eval({self._hidden_var[0]: eye_tensor[i]})
# p(x|z=i)
log_prob = d.log_prob().eval(x_dict)
# p(x, z=i)
log_prob_all.append(log_prob + prior_log_prob)
log_prob_all = torch.stack(log_prob_all, dim=0) # (num_mix, batch_size)
if return_hidden:
return log_prob_all
return torch.logsumexp(log_prob_all, 0)
class PosteriorMixtureModel(Distribution):
def __init__(self, p, name=None):
if name is None:
name = p.name
super().__init__(var=p.var, name=name)
self.p = p
self._hidden_var = p.hidden_var
@property
def hidden_var(self):
"""list: Hidden variables of this distribution."""
return self._hidden_var
@property
def prob_text(self):
_prob_text = "{}({}|{})".format(
self._name, convert_latex_name(self._hidden_var[0]), convert_latex_name(self._var[0])
)
return _prob_text
@property
def prob_factorized_text(self):
numinator = "{" + "{}({},{})".format(self._name, self._hidden_var[0], self._var[0]) + "}"
denominator = "{" + "{}({})".format(self._name, self._var[0]) + "}"
_prob_text = "\\frac{}{}".format(numinator, denominator)
return _prob_text
@property
def distribution_name(self):
return "Mixture Model (Posterior)"
def sample(self, *args, **kwargs):
raise NotImplementedError()
@property
def has_reparam(self):
return False
def get_log_prob(self, x_dict, **kwargs):
# log p(z|x) = log p(x, z) - log p(x)
log_prob = self.p.get_log_prob(x_dict, return_hidden=True, **kwargs) - self.p.get_log_prob(x_dict, **kwargs)
return log_prob # (num_mix, batch_size)
| 8,520
| 32.415686
| 116
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/custom_distributions.py
|
from ..utils import get_dict_values, sum_samples
from .distributions import Distribution
class CustomProb(Distribution):
"""This distribution is constructed by user-defined probability density/mass function.
Note that this distribution cannot perform sampling.
Examples
--------
>>> import torch
>>> # banana shaped distribution
>>> def log_prob(z):
... z1, z2 = torch.chunk(z, chunks=2, dim=1)
... norm = torch.sqrt(z1 ** 2 + z2 ** 2)
... exp1 = torch.exp(-0.5 * ((z1 - 2) / 0.6) ** 2)
... exp2 = torch.exp(-0.5 * ((z1 + 2) / 0.6) ** 2)
... u = 0.5 * ((norm - 2) / 0.4) ** 2 - torch.log(exp1 + exp2)
... return -u
...
>>> p = CustomProb(log_prob, var=["z"])
>>> loss = p.log_prob().eval({"z": torch.randn(10, 2)})
"""
def __init__(self, log_prob_function, var, distribution_name="Custom PDF", **kwargs):
"""
Parameters
----------
log_prob_function : function
User-defined log-probability density/mass function.
var : list
Variables of this distribution.
distribution_name : :obj:`str`, optional
Name of this distribution.
+*kwargs :
Arbitrary keyword arguments.
"""
self._log_prob_function = log_prob_function
self._distribution_name = distribution_name
super().__init__(var=var, **kwargs)
@property
def log_prob_function(self):
"""User-defined log-probability density/mass function."""
return self._log_prob_function
@property
def input_var(self):
return self.var
@property
def distribution_name(self):
return self._distribution_name
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
x_dict = get_dict_values(x_dict, self._var, return_dict=True)
log_prob = self.log_prob_function(**x_dict)
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
def sample(self, x_dict={}, return_all=True, **kwargs):
raise NotImplementedError()
@property
def has_reparam(self):
return False
| 2,210
| 29.708333
| 90
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/moe.py
|
from __future__ import print_function
import torch
from torch import nn
import numpy as np
from ..utils import tolist, get_dict_values
from ..distributions import Normal
class MixtureOfNormal(Normal):
r"""Mixture of normal distributions.
.. math::
p(z|x,y) = p(z|x) + p(z|y)
In this models, :math:`p(z|x)` and :math:`p(a|y)` perform as `experts`.
References
----------
[Shi+ 2019] Variational Mixture-of-Experts Autoencoders for Multi-Modal Deep Generative Models
"""
def __init__(self, p=[], weight_modalities=None, name="p", features_shape=torch.Size()):
"""
Parameters
----------
p : :obj:`list` of :class:`pixyz.distributions.Normal`.
List of experts.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
p = tolist(p)
if len(p) == 0:
raise ValueError()
if weight_modalities is None:
weight_modalities = torch.ones(len(p)) / float(len(p))
elif len(weight_modalities) != len(p):
raise ValueError()
var = p[0].var
cond_var = []
for _p in p:
if _p.var != var:
raise ValueError()
cond_var += _p.cond_var
cond_var = list(set(cond_var))
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self.p = nn.ModuleList(p)
self.weight_modalities = weight_modalities
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
weight : np.array
(n_expert, )
"""
loc = []
scale = []
for i, _p in enumerate(self.p):
inputs_dict = get_dict_values(params_dict, _p.cond_var, True)
if len(inputs_dict) != 0:
outputs = _p.get_params(inputs_dict, **kwargs)
loc.append(outputs["loc"])
scale.append(outputs["scale"])
loc = torch.stack(loc)
scale = torch.stack(scale)
return loc, scale
def get_params(self, params_dict={}, **kwargs):
# experts
if len(params_dict) > 0:
loc, scale = self._get_expert_params(params_dict, **kwargs) # (n_expert, n_batch, output_dim)
else:
raise ValueError()
output_loc, output_scale = self._compute_expert_params(loc, scale)
output_dict = {"loc": output_loc, "scale": output_scale}
return output_dict
def _compute_expert_params(self, loc, scale):
"""Compute parameters for the product of experts.
Is is assumed that unspecified experts are excluded from inputs.
Parameters
----------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
Returns
-------
output_loc : torch.Tensor
Mean vectors for this distribution. (n_batch, output_dim)
output_scale : torch.Tensor
The square root of diagonal covariance matrices for this distribution. (n_batch, output_dim)
"""
num_samples = loc.shape[1]
idx_start = []
idx_end = []
for k in range(0, len(self.weight_modalities)):
if k == 0:
i_start = 0
else:
i_start = int(idx_end[k - 1])
if k == len(self.weight_modalities) - 1:
i_end = num_samples
else:
i_end = i_start + int(np.floor(num_samples * self.weight_modalities[k]))
idx_start.append(i_start)
idx_end.append(i_end)
idx_end[-1] = num_samples
output_loc = torch.cat([loc[k, idx_start[k]:idx_end[k], :] for k in range(len(self.weight_modalities))])
output_scale = torch.cat([scale[k, idx_start[k]:idx_end[k], :] for k in range(len(self.weight_modalities))])
return output_loc, output_scale
def _get_input_dict(self, x, var=None):
if var is None:
var = self.input_var
if type(x) is torch.Tensor:
checked_x = {var[0]: x}
elif type(x) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
checked_x = dict(zip(var, x))
elif type(x) is dict:
# point of modification
checked_x = x
else:
raise ValueError("The type of input is not valid, got %s." % type(x))
return get_dict_values(checked_x, var, return_dict=True)
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
log_prob = torch.stack([w * p.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims) for p, w in zip(self.p, self.weight_modalities)])
log_prob = torch.logsumexp(log_prob, dim=0)
return log_prob
| 5,758
| 32.876471
| 162
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/special_distributions.py
|
from __future__ import print_function
from .distributions import Distribution
class Deterministic(Distribution):
"""
Deterministic distribution (or degeneration distribution)
Examples
--------
>>> import torch
>>> class Generator(Deterministic):
... def __init__(self):
... super().__init__(var=["x"], cond_var=["z"])
... self.model = torch.nn.Linear(64, 512)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p = Generator()
>>> print(p)
Distribution:
p(x|z)
Network architecture:
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=512, bias=True)
)
>>> sample = p.sample({"z": torch.randn(1, 64)})
>>> p.log_prob().eval(sample) # log_prob is not defined.
Traceback (most recent call last):
...
NotImplementedError: Log probability of deterministic distribution is not defined.
"""
def __init__(self, var, cond_var=[], name='p', **kwargs):
super().__init__(var=var, cond_var=cond_var, name=name, **kwargs)
@property
def distribution_name(self):
return "Deterministic"
def sample(self, x_dict={}, return_all=True, **kwargs):
input_dict = self._get_input_dict(x_dict)
output_dict = self.forward(**input_dict)
if set(output_dict.keys()) != set(self._var):
raise ValueError("Output variables are not the same as `var`.")
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict):
return self.sample(x_dict, return_all=False)[self._var[0]]
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
raise NotImplementedError("Log probability of deterministic distribution is not defined.")
@property
def has_reparam(self):
return True
class EmpiricalDistribution(Distribution):
"""
Data distribution.
Samples from this distribution equal given inputs.
Examples
--------
>>> import torch
>>> p = EmpiricalDistribution(var=["x"])
>>> print(p)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> sample = p.sample({"x": torch.randn(1, 64)})
"""
def __init__(self, var, name="p_{data}"):
super().__init__(var=var, cond_var=[], name=name)
@property
def distribution_name(self):
return "Data distribution"
def sample(self, x_dict={}, return_all=True, **kwargs):
output_dict = self._get_input_dict(x_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict):
return self.sample(x_dict, return_all=False)[self._var[0]]
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
raise NotImplementedError()
@property
def input_var(self):
"""
In EmpiricalDistribution, `input_var` is same as `var`.
"""
return self.var
@property
def has_reparam(self):
return True
| 3,517
| 27.836066
| 98
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/__init__.py
|
from .exponential_distributions import (
Normal,
Bernoulli,
RelaxedBernoulli,
FactorizedBernoulli,
Categorical,
RelaxedCategorical,
Multinomial,
Dirichlet,
Beta,
Laplace,
Gamma,
)
from .custom_distributions import (
CustomProb,
)
from .special_distributions import (
Deterministic,
EmpiricalDistribution
)
from .distributions import (
Distribution,
MultiplyDistribution,
MarginalizeVarDistribution,
ReplaceVarDistribution,
)
from .poe import ProductOfNormal, ElementWiseProductOfNormal
from .moe import MixtureOfNormal
from .mixture_distributions import MixtureModel
from .flow_distribution import TransformedDistribution, InverseTransformedDistribution
__all__ = [
'Distribution',
'CustomProb',
'Deterministic',
'EmpiricalDistribution',
'Normal',
'Bernoulli',
'RelaxedBernoulli',
'FactorizedBernoulli',
'Categorical',
'RelaxedCategorical',
'Multinomial',
'Dirichlet',
'Beta',
'Laplace',
'Gamma',
'MultiplyDistribution',
'ReplaceVarDistribution',
'MarginalizeVarDistribution',
'ProductOfNormal',
'ElementWiseProductOfNormal',
'MixtureOfNormal',
'MixtureModel',
'TransformedDistribution',
'InverseTransformedDistribution',
]
| 1,300
| 19.015385
| 86
|
py
|
pixyz
|
pixyz-main/pixyz/distributions/flow_distribution.py
|
import torch
from ..distributions import Distribution
from ..utils import get_dict_values
class TransformedDistribution(Distribution):
r"""
Convert flow transformations to distributions.
.. math::
p(z=f_{flow}(x)),
where :math:`x \sim p_{prior}(x)`.
Once initializing, it can be handled as a distribution module.
"""
def __init__(self, prior, flow, var, name="p"):
if flow.in_features:
features_shape = [flow.in_features]
else:
features_shape = torch.Size()
super().__init__(var=var,
cond_var=prior.cond_var, name=name, features_shape=features_shape)
self.prior = prior
self.flow = flow # FlowList
self._flow_input_var = list(prior.var)
self.stored_x = {}
@property
def distribution_name(self):
return "TransformedDistribution"
@property
def flow_input_var(self):
"""list: Input variables of the flow module."""
return self._flow_input_var
@property
def prob_factorized_text(self):
flow_text = "{}=f_{{flow}}({})".format(self.var[0], self.flow_input_var[0])
prob_text = "{}({})".format(self._name, flow_text)
return prob_text
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self.flow.logdet_jacobian
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
compute_jacobian=True, **kwargs):
# sample from the prior
sample_dict = self.prior.sample(x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=False, **kwargs)
# flow transformation
_x = get_dict_values(sample_dict, self.flow_input_var)[0]
z = self.forward(_x, compute_jacobian=compute_jacobian)
output_dict = {self.var[0]: z}
output_dict.update(sample_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return self.prior.has_reparam
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, compute_jacobian=False, **kwargs):
"""
It calculates the log-likelihood for a given z.
If a flow module has no inverse method, it only supports the previously sampled z-values.
"""
inf_dict = self._inference(x_dict, compute_jacobian=compute_jacobian)
# prior
log_prob_prior = self.prior.get_log_prob(inf_dict, sum_features=sum_features, feature_dims=feature_dims,
**kwargs)
return log_prob_prior - self.logdet_jacobian
def _inference(self, x_dict, return_all=True, compute_jacobian=False):
# flow transformation
_z = get_dict_values(x_dict, self.var)
_y = get_dict_values(x_dict, self.cond_var, return_dict=True)
try:
x = self.inverse(_z[0])
except NotImplementedError:
hash_z = hash(_z[0])
if hash_z not in self.stored_x:
raise Exception("Cannot calculate x because it is not z used in the previous sample.")
x = self.stored_x[hash_z]
self.stored_x.pop(hash_z)
output_dict = {self._flow_input_var[0]: x,
self.var[0]: _z}
output_dict.update(_y)
# flow
if compute_jacobian:
self(x, compute_jacobian=True)
if return_all:
output_dict.update(x_dict)
return output_dict
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
# hotfix: Suppress warnings from pytorch about mixed memory operations
z = self.flow.forward(x=x, y=y, compute_jacobian=compute_jacobian).contiguous()
self.stored_x.clear()
self.stored_x[hash(z)] = x
return z
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
return self.flow.inverse(z=z, y=y)
class InverseTransformedDistribution(Distribution):
r"""
Convert inverse flow transformations to distributions.
.. math::
p(x=f^{-1}_{flow}(z)),
where :math:`z \sim p_{prior}(z)`.
Once initializing, it can be handled as a distribution module.
Moreover, this distribution can take a conditional variable.
.. math::
p(x=f^{-1}_{flow}(z, y)),
where :math:`z \sim p_{prior}(z)` and :math:`y` is given.
"""
def __init__(self, prior, flow, var, cond_var=[], name="p"):
if flow.in_features:
features_shape = [flow.in_features]
else:
features_shape = torch.Size()
super().__init__(var, cond_var=cond_var, name=name, features_shape=features_shape)
self.prior = prior
self.flow = flow # FlowList
self._flow_output_var = list(prior.var)
@property
def distribution_name(self):
return "InverseTransformedDistribution"
@property
def flow_output_var(self):
return self._flow_output_var
@property
def prob_factorized_text(self):
var_text = ','.join(self.flow_output_var + self.cond_var)
flow_text = "{}=f^{{-1}}_{{flow}}({})".format(self.var[0], var_text)
prob_text = "{}({})".format(self._name, flow_text)
return prob_text
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self.flow.logdet_jacobian
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
return_hidden=True, sample_mean=False, **kwargs):
# sample from the prior
sample_dict = self.prior.sample(x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=False,
reparam=reparam, sample_mean=sample_mean, **kwargs)
# inverse flow transformation
_z = get_dict_values(sample_dict, self.flow_output_var)
_y = get_dict_values(x_dict, self.cond_var)
if len(_y) == 0:
x = self.inverse(_z[0])
else:
x = self.inverse(_z[0], y=_y[0])
output_dict = {self.var[0]: x}
if return_hidden:
output_dict.update(sample_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return self.prior.has_reparam
def inference(self, x_dict, return_all=True, compute_jacobian=False):
# flow transformation
_x = get_dict_values(x_dict, self.var)
_y = get_dict_values(x_dict, self.cond_var)
if len(_y) == 0:
z = self.forward(_x[0], compute_jacobian=compute_jacobian)
else:
z = self.forward(_x[0], y=_y[0], compute_jacobian=compute_jacobian)
output_dict = {self.flow_output_var[0]: z}
if return_all:
output_dict.update(x_dict)
return output_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
# flow
output_dict = self.inference(x_dict, return_all=True, compute_jacobian=True)
# prior
log_prob_prior = self.prior.get_log_prob(output_dict, sum_features=sum_features, feature_dims=feature_dims,
**kwargs)
return log_prob_prior + self.logdet_jacobian
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
# hotfix: Suppress warnings from pytorch about mixed memory operations
return self.flow.forward(x=x, y=y, compute_jacobian=compute_jacobian).contiguous()
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
return self.flow.inverse(z=z, y=y)
| 9,870
| 28.912121
| 119
|
py
|
pixyz
|
pixyz-main/pixyz/flows/__init__.py
|
from .flows import (
Flow,
FlowList,
)
from .normalizing_flows import (
PlanarFlow
)
from .coupling import (
AffineCoupling,
)
from .conv import (
ChannelConv
)
from .operations import (
Squeeze,
Unsqueeze,
Permutation,
Shuffle,
Reverse,
Flatten,
Preprocess,
)
from .normalizations import (
BatchNorm1d,
BatchNorm2d,
ActNorm2d,
)
__all__ = [
'Flow',
'FlowList',
'PlanarFlow',
'AffineCoupling',
'ChannelConv',
'Squeeze',
'Unsqueeze',
'Permutation',
'Shuffle',
'Reverse',
'Flatten',
'Preprocess',
'BatchNorm1d',
'BatchNorm2d',
'ActNorm2d',
]
| 666
| 12.078431
| 32
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.