| | import copy |
| | import math |
| |
|
| | import torch |
| | from torch import nn |
| | from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d |
| | from torch.nn import functional as F |
| | from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm |
| |
|
| | import attentions |
| | import commons |
| | import modules |
| | import monotonic_align |
| | from commons import get_padding, init_weights |
| |
|
| | AVAILABLE_FLOW_TYPES = [ |
| | "pre_conv", |
| | "pre_conv2", |
| | "fft", |
| | "mono_layer_inter_residual", |
| | "mono_layer_post_residual", |
| | ] |
| |
|
| | AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [ |
| | "dur_disc_1", |
| | "dur_disc_2", |
| | ] |
| |
|
| |
|
| | class StochasticDurationPredictor(nn.Module): |
| | def __init__( |
| | self, |
| | in_channels, |
| | filter_channels, |
| | kernel_size, |
| | p_dropout, |
| | n_flows=4, |
| | gin_channels=0, |
| | ): |
| | super().__init__() |
| | filter_channels = in_channels |
| | self.in_channels = in_channels |
| | self.filter_channels = filter_channels |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.n_flows = n_flows |
| | self.gin_channels = gin_channels |
| |
|
| | self.log_flow = modules.Log() |
| | self.flows = nn.ModuleList() |
| | self.flows.append(modules.ElementwiseAffine(2)) |
| | for i in range(n_flows): |
| | self.flows.append( |
| | modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) |
| | ) |
| | self.flows.append(modules.Flip()) |
| |
|
| | self.post_pre = nn.Conv1d(1, filter_channels, 1) |
| | self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) |
| | self.post_convs = modules.DDSConv( |
| | filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout |
| | ) |
| | self.post_flows = nn.ModuleList() |
| | self.post_flows.append(modules.ElementwiseAffine(2)) |
| | for i in range(4): |
| | self.post_flows.append( |
| | modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) |
| | ) |
| | self.post_flows.append(modules.Flip()) |
| |
|
| | self.pre = nn.Conv1d(in_channels, filter_channels, 1) |
| | self.proj = nn.Conv1d(filter_channels, filter_channels, 1) |
| | self.convs = modules.DDSConv( |
| | filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout |
| | ) |
| | if gin_channels != 0: |
| | self.cond = nn.Conv1d(gin_channels, filter_channels, 1) |
| |
|
| | def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): |
| | x = torch.detach(x) |
| | x = self.pre(x) |
| | if g is not None: |
| | g = torch.detach(g) |
| | x = x + self.cond(g) |
| | x = self.convs(x, x_mask) |
| | x = self.proj(x) * x_mask |
| |
|
| | if not reverse: |
| | flows = self.flows |
| | assert w is not None |
| |
|
| | logdet_tot_q = 0 |
| | h_w = self.post_pre(w) |
| | h_w = self.post_convs(h_w, x_mask) |
| | h_w = self.post_proj(h_w) * x_mask |
| | e_q = ( |
| | torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) |
| | * x_mask |
| | ) |
| | z_q = e_q |
| | for flow in self.post_flows: |
| | z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) |
| | logdet_tot_q += logdet_q |
| | z_u, z1 = torch.split(z_q, [1, 1], 1) |
| | u = torch.sigmoid(z_u) * x_mask |
| | z0 = (w - u) * x_mask |
| | logdet_tot_q += torch.sum( |
| | (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2] |
| | ) |
| | logq = ( |
| | torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2]) |
| | - logdet_tot_q |
| | ) |
| |
|
| | logdet_tot = 0 |
| | z0, logdet = self.log_flow(z0, x_mask) |
| | logdet_tot += logdet |
| | z = torch.cat([z0, z1], 1) |
| | for flow in flows: |
| | z, logdet = flow(z, x_mask, g=x, reverse=reverse) |
| | logdet_tot = logdet_tot + logdet |
| | nll = ( |
| | torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2]) |
| | - logdet_tot |
| | ) |
| | return nll + logq |
| | else: |
| | flows = list(reversed(self.flows)) |
| | flows = flows[:-2] + [flows[-1]] |
| | z = ( |
| | torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) |
| | * noise_scale |
| | ) |
| | for flow in flows: |
| | z = flow(z, x_mask, g=x, reverse=reverse) |
| | z0, z1 = torch.split(z, [1, 1], 1) |
| | logw = z0 |
| | return logw |
| |
|
| |
|
| | class DurationPredictor(nn.Module): |
| | def __init__( |
| | self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 |
| | ): |
| | super().__init__() |
| |
|
| | self.in_channels = in_channels |
| | self.filter_channels = filter_channels |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.gin_channels = gin_channels |
| |
|
| | self.drop = nn.Dropout(p_dropout) |
| | self.conv_1 = nn.Conv1d( |
| | in_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.norm_1 = modules.LayerNorm(filter_channels) |
| | self.conv_2 = nn.Conv1d( |
| | filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.norm_2 = modules.LayerNorm(filter_channels) |
| | self.proj = nn.Conv1d(filter_channels, 1, 1) |
| |
|
| | if gin_channels != 0: |
| | self.cond = nn.Conv1d(gin_channels, in_channels, 1) |
| |
|
| | def forward(self, x, x_mask, g=None): |
| | x = torch.detach(x) |
| | if g is not None: |
| | g = torch.detach(g) |
| | x = x + self.cond(g) |
| | x = self.conv_1(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.norm_1(x) |
| | x = self.drop(x) |
| | x = self.conv_2(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.norm_2(x) |
| | x = self.drop(x) |
| | x = self.proj(x * x_mask) |
| | return x * x_mask |
| |
|
| |
|
| | class DurationDiscriminatorV1(nn.Module): |
| | |
| | |
| | def __init__( |
| | self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 |
| | ): |
| | super().__init__() |
| |
|
| | self.in_channels = in_channels |
| | self.filter_channels = filter_channels |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.gin_channels = gin_channels |
| |
|
| | self.drop = nn.Dropout(p_dropout) |
| | self.conv_1 = nn.Conv1d( |
| | in_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | |
| | self.conv_2 = nn.Conv1d( |
| | filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | |
| | self.dur_proj = nn.Conv1d(1, filter_channels, 1) |
| |
|
| | self.pre_out_conv_1 = nn.Conv1d( |
| | 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.pre_out_norm_1 = modules.LayerNorm(filter_channels) |
| | self.pre_out_conv_2 = nn.Conv1d( |
| | filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.pre_out_norm_2 = modules.LayerNorm(filter_channels) |
| |
|
| | |
| | |
| |
|
| | self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid()) |
| |
|
| | def forward_probability(self, x, x_mask, dur, g=None): |
| | dur = self.dur_proj(dur) |
| | x = torch.cat([x, dur], dim=1) |
| | x = self.pre_out_conv_1(x * x_mask) |
| | |
| | |
| | |
| | x = self.pre_out_conv_2(x * x_mask) |
| | |
| | |
| | |
| | x = x * x_mask |
| | x = x.transpose(1, 2) |
| | output_prob = self.output_layer(x) |
| | return output_prob |
| |
|
| | def forward(self, x, x_mask, dur_r, dur_hat, g=None): |
| | x = torch.detach(x) |
| | |
| | |
| | |
| | x = self.conv_1(x * x_mask) |
| | |
| | |
| | |
| | x = self.conv_2(x * x_mask) |
| | |
| | |
| | |
| |
|
| | output_probs = [] |
| | for dur in [dur_r, dur_hat]: |
| | output_prob = self.forward_probability(x, x_mask, dur, g) |
| | output_probs.append(output_prob) |
| |
|
| | return output_probs |
| |
|
| |
|
| | class DurationDiscriminatorV2(nn.Module): |
| | |
| | |
| | def __init__( |
| | self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 |
| | ): |
| | super().__init__() |
| |
|
| | self.in_channels = in_channels |
| | self.filter_channels = filter_channels |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.gin_channels = gin_channels |
| |
|
| | self.conv_1 = nn.Conv1d( |
| | in_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.norm_1 = modules.LayerNorm(filter_channels) |
| | self.conv_2 = nn.Conv1d( |
| | filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.norm_2 = modules.LayerNorm(filter_channels) |
| | self.dur_proj = nn.Conv1d(1, filter_channels, 1) |
| |
|
| | self.pre_out_conv_1 = nn.Conv1d( |
| | 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.pre_out_norm_1 = modules.LayerNorm(filter_channels) |
| | self.pre_out_conv_2 = nn.Conv1d( |
| | filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 |
| | ) |
| | self.pre_out_norm_2 = modules.LayerNorm(filter_channels) |
| |
|
| | |
| | |
| |
|
| | self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid()) |
| |
|
| | def forward_probability(self, x, x_mask, dur, g=None): |
| | dur = self.dur_proj(dur) |
| | x = torch.cat([x, dur], dim=1) |
| | x = self.pre_out_conv_1(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.pre_out_norm_1(x) |
| | x = self.pre_out_conv_2(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.pre_out_norm_2(x) |
| | x = x * x_mask |
| | x = x.transpose(1, 2) |
| | output_prob = self.output_layer(x) |
| | return output_prob |
| |
|
| | def forward(self, x, x_mask, dur_r, dur_hat, g=None): |
| | x = torch.detach(x) |
| | |
| | |
| | |
| | x = self.conv_1(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.norm_1(x) |
| | x = self.conv_2(x * x_mask) |
| | x = torch.relu(x) |
| | x = self.norm_2(x) |
| |
|
| | output_probs = [] |
| | for dur in [dur_r, dur_hat]: |
| | output_prob = self.forward_probability(x, x_mask, dur, g) |
| | output_probs.append([output_prob]) |
| |
|
| | return output_probs |
| |
|
| |
|
| | class TextEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | n_vocab, |
| | out_channels, |
| | hidden_channels, |
| | filter_channels, |
| | n_heads, |
| | n_layers, |
| | kernel_size, |
| | p_dropout, |
| | gin_channels=0, |
| | ): |
| | super().__init__() |
| | self.n_vocab = n_vocab |
| | self.out_channels = out_channels |
| | self.hidden_channels = hidden_channels |
| | self.filter_channels = filter_channels |
| | self.n_heads = n_heads |
| | self.n_layers = n_layers |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.gin_channels = gin_channels |
| | self.emb = nn.Embedding(n_vocab, hidden_channels) |
| | nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) |
| |
|
| | self.encoder = attentions.Encoder( |
| | hidden_channels, |
| | filter_channels, |
| | n_heads, |
| | n_layers, |
| | kernel_size, |
| | p_dropout, |
| | gin_channels=self.gin_channels, |
| | ) |
| | self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) |
| |
|
| | def forward(self, x, x_lengths, g=None): |
| | x = self.emb(x) * math.sqrt(self.hidden_channels) |
| | x = torch.transpose(x, 1, -1) |
| | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( |
| | x.dtype |
| | ) |
| |
|
| | x = self.encoder(x * x_mask, x_mask, g=g) |
| | stats = self.proj(x) * x_mask |
| |
|
| | m, logs = torch.split(stats, self.out_channels, dim=1) |
| | return x, m, logs, x_mask |
| |
|
| |
|
| | class ResidualCouplingTransformersLayer2(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | p_dropout=0, |
| | gin_channels=0, |
| | mean_only=False, |
| | ): |
| | assert channels % 2 == 0, "channels should be divisible by 2" |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.dilation_rate = dilation_rate |
| | self.n_layers = n_layers |
| | self.half_channels = channels // 2 |
| | self.mean_only = mean_only |
| |
|
| | self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) |
| | self.pre_transformer = attentions.Encoder( |
| | hidden_channels, |
| | hidden_channels, |
| | n_heads=2, |
| | n_layers=1, |
| | kernel_size=kernel_size, |
| | p_dropout=p_dropout, |
| | |
| | ) |
| | self.enc = modules.WN( |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | p_dropout=p_dropout, |
| | gin_channels=gin_channels, |
| | ) |
| |
|
| | self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) |
| | self.post.weight.data.zero_() |
| | self.post.bias.data.zero_() |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | h = self.pre(x0) * x_mask |
| | h = h + self.pre_transformer(h * x_mask, x_mask) |
| | h = self.enc(h, x_mask, g=g) |
| | stats = self.post(h) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| | if not reverse: |
| | x1 = m + x1 * torch.exp(logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | logdet = torch.sum(logs, [1, 2]) |
| | return x, logdet |
| | else: |
| | x1 = (x1 - m) * torch.exp(-logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | return x |
| |
|
| |
|
| | class ResidualCouplingTransformersLayer(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | p_dropout=0, |
| | gin_channels=0, |
| | mean_only=False, |
| | ): |
| | assert channels % 2 == 0, "channels should be divisible by 2" |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.dilation_rate = dilation_rate |
| | self.n_layers = n_layers |
| | self.half_channels = channels // 2 |
| | self.mean_only = mean_only |
| | |
| | self.pre_transformer = attentions.Encoder( |
| | self.half_channels, |
| | self.half_channels, |
| | n_heads=2, |
| | n_layers=2, |
| | kernel_size=3, |
| | p_dropout=0.1, |
| | window_size=None, |
| | ) |
| |
|
| | self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) |
| | self.enc = modules.WN( |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | p_dropout=p_dropout, |
| | gin_channels=gin_channels, |
| | ) |
| | |
| | self.post_transformer = attentions.Encoder( |
| | self.hidden_channels, |
| | self.hidden_channels, |
| | n_heads=2, |
| | n_layers=2, |
| | kernel_size=3, |
| | p_dropout=0.1, |
| | window_size=None, |
| | ) |
| |
|
| | self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) |
| | self.post.weight.data.zero_() |
| | self.post.bias.data.zero_() |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | x0_ = self.pre_transformer(x0 * x_mask, x_mask) |
| | x0_ = x0_ + x0 |
| | h = self.pre(x0_) * x_mask |
| | h = self.enc(h, x_mask, g=g) |
| |
|
| | |
| | |
| | |
| |
|
| | stats = self.post(h) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| | if not reverse: |
| | x1 = m + x1 * torch.exp(logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | logdet = torch.sum(logs, [1, 2]) |
| | return x, logdet |
| | else: |
| | x1 = (x1 - m) * torch.exp(-logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | return x |
| |
|
| |
|
| | class FFTransformerCouplingLayer(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | n_layers, |
| | n_heads, |
| | p_dropout=0, |
| | filter_channels=768, |
| | mean_only=False, |
| | gin_channels=0, |
| | ): |
| | assert channels % 2 == 0, "channels should be divisible by 2" |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.n_layers = n_layers |
| | self.half_channels = channels // 2 |
| | self.mean_only = mean_only |
| |
|
| | self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) |
| | self.enc = attentions.FFT( |
| | hidden_channels, |
| | filter_channels, |
| | n_heads, |
| | n_layers, |
| | kernel_size, |
| | p_dropout, |
| | isflow=True, |
| | gin_channels=gin_channels, |
| | ) |
| | self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) |
| | self.post.weight.data.zero_() |
| | self.post.bias.data.zero_() |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | h = self.pre(x0) * x_mask |
| | h_ = self.enc(h, x_mask, g=g) |
| | h = h_ + h |
| | stats = self.post(h) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| |
|
| | if not reverse: |
| | x1 = m + x1 * torch.exp(logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | logdet = torch.sum(logs, [1, 2]) |
| | return x, logdet |
| | else: |
| | x1 = (x1 - m) * torch.exp(-logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | return x |
| |
|
| |
|
| | class MonoTransformerFlowLayer(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | mean_only=False, |
| | residual_connection=False, |
| | |
| | ): |
| | assert channels % 2 == 0, "channels should be divisible by 2" |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.half_channels = channels // 2 |
| | self.mean_only = mean_only |
| | self.residual_connection = residual_connection |
| | |
| | self.pre_transformer = attentions.Encoder( |
| | self.half_channels, |
| | self.half_channels, |
| | n_heads=2, |
| | n_layers=2, |
| | kernel_size=3, |
| | p_dropout=0.1, |
| | window_size=None, |
| | ) |
| |
|
| | self.post = nn.Conv1d( |
| | self.half_channels, self.half_channels * (2 - mean_only), 1 |
| | ) |
| | self.post.weight.data.zero_() |
| | self.post.bias.data.zero_() |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | if self.residual_connection: |
| | if not reverse: |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | x0_ = self.pre_transformer(x0, x_mask) |
| | stats = self.post(x0_) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| | x1 = m + x1 * torch.exp(logs) * x_mask |
| | x_ = torch.cat([x0, x1], 1) |
| | x = x + x_ |
| | logdet = torch.sum(torch.log(torch.exp(logs) + 1), [1, 2]) |
| | logdet = logdet + torch.log(torch.tensor(2)) * ( |
| | x0.shape[1] * x0.shape[2] |
| | ) |
| | return x, logdet |
| | else: |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | x0 = x0 / 2 |
| | x0_ = x0 * x_mask |
| | x0_ = self.pre_transformer(x0, x_mask) |
| | stats = self.post(x0_) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| | x1_ = ((x1 - m) / (1 + torch.exp(-logs))) * x_mask |
| | x = torch.cat([x0, x1_], 1) |
| | return x |
| | else: |
| | x0, x1 = torch.split(x, [self.half_channels] * 2, 1) |
| | x0_ = self.pre_transformer(x0 * x_mask, x_mask) |
| | h = x0_ + x0 |
| | stats = self.post(h) * x_mask |
| | if not self.mean_only: |
| | m, logs = torch.split(stats, [self.half_channels] * 2, 1) |
| | else: |
| | m = stats |
| | logs = torch.zeros_like(m) |
| | if not reverse: |
| | x1 = m + x1 * torch.exp(logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | logdet = torch.sum(logs, [1, 2]) |
| | return x, logdet |
| | else: |
| | x1 = (x1 - m) * torch.exp(-logs) * x_mask |
| | x = torch.cat([x0, x1], 1) |
| | return x |
| |
|
| |
|
| | class ResidualCouplingTransformersBlock(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | n_flows=4, |
| | gin_channels=0, |
| | use_transformer_flows=False, |
| | transformer_flow_type="pre_conv", |
| | ): |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.dilation_rate = dilation_rate |
| | self.n_layers = n_layers |
| | self.n_flows = n_flows |
| | self.gin_channels = gin_channels |
| |
|
| | self.flows = nn.ModuleList() |
| | if use_transformer_flows: |
| | if transformer_flow_type == "pre_conv": |
| | for i in range(n_flows): |
| | self.flows.append( |
| | ResidualCouplingTransformersLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| | elif transformer_flow_type == "pre_conv2": |
| | for i in range(n_flows): |
| | self.flows.append( |
| | ResidualCouplingTransformersLayer2( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| | elif transformer_flow_type == "fft": |
| | for i in range(n_flows): |
| | self.flows.append( |
| | FFTransformerCouplingLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| | elif transformer_flow_type == "mono_layer_inter_residual": |
| | for i in range(n_flows): |
| | self.flows.append( |
| | modules.ResidualCouplingLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| | self.flows.append( |
| | MonoTransformerFlowLayer( |
| | channels, hidden_channels, mean_only=True |
| | ) |
| | ) |
| | elif transformer_flow_type == "mono_layer_post_residual": |
| | for i in range(n_flows): |
| | self.flows.append( |
| | modules.ResidualCouplingLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| | self.flows.append( |
| | MonoTransformerFlowLayer( |
| | channels, |
| | hidden_channels, |
| | mean_only=True, |
| | residual_connection=True, |
| | ) |
| | ) |
| | else: |
| | for i in range(n_flows): |
| | self.flows.append( |
| | modules.ResidualCouplingLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | if not reverse: |
| | for flow in self.flows: |
| | x, _ = flow(x, x_mask, g=g, reverse=reverse) |
| | else: |
| | for flow in reversed(self.flows): |
| | x = flow(x, x_mask, g=g, reverse=reverse) |
| | return x |
| |
|
| |
|
| | class ResidualCouplingBlock(nn.Module): |
| | def __init__( |
| | self, |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | n_flows=4, |
| | gin_channels=0, |
| | ): |
| | super().__init__() |
| | self.channels = channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.dilation_rate = dilation_rate |
| | self.n_layers = n_layers |
| | self.n_flows = n_flows |
| | self.gin_channels = gin_channels |
| |
|
| | self.flows = nn.ModuleList() |
| | for i in range(n_flows): |
| | self.flows.append( |
| | modules.ResidualCouplingLayer( |
| | channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | mean_only=True, |
| | ) |
| | ) |
| | self.flows.append(modules.Flip()) |
| |
|
| | def forward(self, x, x_mask, g=None, reverse=False): |
| | if not reverse: |
| | for flow in self.flows: |
| | x, _ = flow(x, x_mask, g=g, reverse=reverse) |
| | else: |
| | for flow in reversed(self.flows): |
| | x = flow(x, x_mask, g=g, reverse=reverse) |
| | return x |
| |
|
| |
|
| | class PosteriorEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | in_channels, |
| | out_channels, |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=0, |
| | ): |
| | super().__init__() |
| | self.in_channels = in_channels |
| | self.out_channels = out_channels |
| | self.hidden_channels = hidden_channels |
| | self.kernel_size = kernel_size |
| | self.dilation_rate = dilation_rate |
| | self.n_layers = n_layers |
| | self.gin_channels = gin_channels |
| |
|
| | self.pre = nn.Conv1d(in_channels, hidden_channels, 1) |
| | self.enc = modules.WN( |
| | hidden_channels, |
| | kernel_size, |
| | dilation_rate, |
| | n_layers, |
| | gin_channels=gin_channels, |
| | ) |
| | self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) |
| |
|
| | def forward(self, x, x_lengths, g=None): |
| | x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( |
| | x.dtype |
| | ) |
| | x = self.pre(x) * x_mask |
| | x = self.enc(x, x_mask, g=g) |
| | stats = self.proj(x) * x_mask |
| | m, logs = torch.split(stats, self.out_channels, dim=1) |
| | z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask |
| | return z, m, logs, x_mask |
| |
|
| |
|
| | class Generator(torch.nn.Module): |
| | def __init__( |
| | self, |
| | initial_channel, |
| | resblock, |
| | resblock_kernel_sizes, |
| | resblock_dilation_sizes, |
| | upsample_rates, |
| | upsample_initial_channel, |
| | upsample_kernel_sizes, |
| | gin_channels=0, |
| | ): |
| | super(Generator, self).__init__() |
| | self.num_kernels = len(resblock_kernel_sizes) |
| | self.num_upsamples = len(upsample_rates) |
| | self.conv_pre = Conv1d( |
| | initial_channel, upsample_initial_channel, 7, 1, padding=3 |
| | ) |
| | resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 |
| |
|
| | self.ups = nn.ModuleList() |
| | for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): |
| | self.ups.append( |
| | weight_norm( |
| | ConvTranspose1d( |
| | upsample_initial_channel // (2**i), |
| | upsample_initial_channel // (2 ** (i + 1)), |
| | k, |
| | u, |
| | padding=(k - u) // 2, |
| | ) |
| | ) |
| | ) |
| |
|
| | self.resblocks = nn.ModuleList() |
| | for i in range(len(self.ups)): |
| | ch = upsample_initial_channel // (2 ** (i + 1)) |
| | for j, (k, d) in enumerate( |
| | zip(resblock_kernel_sizes, resblock_dilation_sizes) |
| | ): |
| | self.resblocks.append(resblock(ch, k, d)) |
| |
|
| | self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) |
| | self.ups.apply(init_weights) |
| |
|
| | if gin_channels != 0: |
| | self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) |
| |
|
| | def forward(self, x, g=None): |
| | x = self.conv_pre(x) |
| | if g is not None: |
| | x = x + self.cond(g) |
| |
|
| | for i in range(self.num_upsamples): |
| | x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| | x = self.ups[i](x) |
| | xs = None |
| | for j in range(self.num_kernels): |
| | if xs is None: |
| | xs = self.resblocks[i * self.num_kernels + j](x) |
| | else: |
| | xs += self.resblocks[i * self.num_kernels + j](x) |
| | x = xs / self.num_kernels |
| | x = F.leaky_relu(x) |
| | x = self.conv_post(x) |
| | x = torch.tanh(x) |
| |
|
| | return x |
| |
|
| | def remove_weight_norm(self): |
| | print("Removing weight norm...") |
| | for l in self.ups: |
| | remove_weight_norm(l) |
| | for l in self.resblocks: |
| | l.remove_weight_norm() |
| |
|
| |
|
| | class DiscriminatorP(torch.nn.Module): |
| | def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): |
| | super(DiscriminatorP, self).__init__() |
| | self.period = period |
| | self.use_spectral_norm = use_spectral_norm |
| | norm_f = weight_norm if use_spectral_norm == False else spectral_norm |
| | self.convs = nn.ModuleList( |
| | [ |
| | norm_f( |
| | Conv2d( |
| | 1, |
| | 32, |
| | (kernel_size, 1), |
| | (stride, 1), |
| | padding=(get_padding(kernel_size, 1), 0), |
| | ) |
| | ), |
| | norm_f( |
| | Conv2d( |
| | 32, |
| | 128, |
| | (kernel_size, 1), |
| | (stride, 1), |
| | padding=(get_padding(kernel_size, 1), 0), |
| | ) |
| | ), |
| | norm_f( |
| | Conv2d( |
| | 128, |
| | 512, |
| | (kernel_size, 1), |
| | (stride, 1), |
| | padding=(get_padding(kernel_size, 1), 0), |
| | ) |
| | ), |
| | norm_f( |
| | Conv2d( |
| | 512, |
| | 1024, |
| | (kernel_size, 1), |
| | (stride, 1), |
| | padding=(get_padding(kernel_size, 1), 0), |
| | ) |
| | ), |
| | norm_f( |
| | Conv2d( |
| | 1024, |
| | 1024, |
| | (kernel_size, 1), |
| | 1, |
| | padding=(get_padding(kernel_size, 1), 0), |
| | ) |
| | ), |
| | ] |
| | ) |
| | self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) |
| |
|
| | def forward(self, x): |
| | fmap = [] |
| |
|
| | |
| | b, c, t = x.shape |
| | if t % self.period != 0: |
| | n_pad = self.period - (t % self.period) |
| | x = F.pad(x, (0, n_pad), "reflect") |
| | t = t + n_pad |
| | x = x.view(b, c, t // self.period, self.period) |
| |
|
| | for l in self.convs: |
| | x = l(x) |
| | x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| | fmap.append(x) |
| | x = self.conv_post(x) |
| | fmap.append(x) |
| | x = torch.flatten(x, 1, -1) |
| |
|
| | return x, fmap |
| |
|
| |
|
| | class DiscriminatorS(torch.nn.Module): |
| | def __init__(self, use_spectral_norm=False): |
| | super(DiscriminatorS, self).__init__() |
| | norm_f = weight_norm if use_spectral_norm == False else spectral_norm |
| | self.convs = nn.ModuleList( |
| | [ |
| | norm_f(Conv1d(1, 16, 15, 1, padding=7)), |
| | norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), |
| | norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), |
| | norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), |
| | norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), |
| | norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), |
| | ] |
| | ) |
| | self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) |
| |
|
| | def forward(self, x): |
| | fmap = [] |
| |
|
| | for l in self.convs: |
| | x = l(x) |
| | x = F.leaky_relu(x, modules.LRELU_SLOPE) |
| | fmap.append(x) |
| | x = self.conv_post(x) |
| | fmap.append(x) |
| | x = torch.flatten(x, 1, -1) |
| |
|
| | return x, fmap |
| |
|
| |
|
| | class MultiPeriodDiscriminator(torch.nn.Module): |
| | def __init__(self, use_spectral_norm=False): |
| | super(MultiPeriodDiscriminator, self).__init__() |
| | periods = [2, 3, 5, 7, 11] |
| |
|
| | discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] |
| | discs = discs + [ |
| | DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods |
| | ] |
| | self.discriminators = nn.ModuleList(discs) |
| |
|
| | def forward(self, y, y_hat): |
| | y_d_rs = [] |
| | y_d_gs = [] |
| | fmap_rs = [] |
| | fmap_gs = [] |
| | for i, d in enumerate(self.discriminators): |
| | y_d_r, fmap_r = d(y) |
| | y_d_g, fmap_g = d(y_hat) |
| | y_d_rs.append(y_d_r) |
| | y_d_gs.append(y_d_g) |
| | fmap_rs.append(fmap_r) |
| | fmap_gs.append(fmap_g) |
| |
|
| | return y_d_rs, y_d_gs, fmap_rs, fmap_gs |
| |
|
| |
|
| | class SynthesizerTrn(nn.Module): |
| | """ |
| | Synthesizer for Training |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | n_vocab, |
| | spec_channels, |
| | segment_size, |
| | inter_channels, |
| | hidden_channels, |
| | filter_channels, |
| | n_heads, |
| | n_layers, |
| | kernel_size, |
| | p_dropout, |
| | resblock, |
| | resblock_kernel_sizes, |
| | resblock_dilation_sizes, |
| | upsample_rates, |
| | upsample_initial_channel, |
| | upsample_kernel_sizes, |
| | n_speakers=0, |
| | gin_channels=0, |
| | use_sdp=True, |
| | **kwargs, |
| | ): |
| | super().__init__() |
| | self.n_vocab = n_vocab |
| | self.spec_channels = spec_channels |
| | self.inter_channels = inter_channels |
| | self.hidden_channels = hidden_channels |
| | self.filter_channels = filter_channels |
| | self.n_heads = n_heads |
| | self.n_layers = n_layers |
| | self.kernel_size = kernel_size |
| | self.p_dropout = p_dropout |
| | self.resblock = resblock |
| | self.resblock_kernel_sizes = resblock_kernel_sizes |
| | self.resblock_dilation_sizes = resblock_dilation_sizes |
| | self.upsample_rates = upsample_rates |
| | self.upsample_initial_channel = upsample_initial_channel |
| | self.upsample_kernel_sizes = upsample_kernel_sizes |
| | self.segment_size = segment_size |
| | self.n_speakers = n_speakers |
| | self.gin_channels = gin_channels |
| | self.use_spk_conditioned_encoder = kwargs.get( |
| | "use_spk_conditioned_encoder", False |
| | ) |
| | self.use_transformer_flows = kwargs.get("use_transformer_flows", False) |
| | self.transformer_flow_type = kwargs.get( |
| | "transformer_flow_type", "mono_layer_post_residual" |
| | ) |
| | if self.use_transformer_flows: |
| | assert ( |
| | self.transformer_flow_type in AVAILABLE_FLOW_TYPES |
| | ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" |
| | self.use_sdp = use_sdp |
| | |
| | self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) |
| | self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) |
| | self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) |
| |
|
| | self.current_mas_noise_scale = self.mas_noise_scale_initial |
| | if self.use_spk_conditioned_encoder and gin_channels > 0: |
| | self.enc_gin_channels = gin_channels |
| | else: |
| | self.enc_gin_channels = 0 |
| | self.enc_p = TextEncoder( |
| | n_vocab, |
| | inter_channels, |
| | hidden_channels, |
| | filter_channels, |
| | n_heads, |
| | n_layers, |
| | kernel_size, |
| | p_dropout, |
| | gin_channels=self.enc_gin_channels, |
| | ) |
| |
|
| | self.dec = Generator( |
| | inter_channels, |
| | resblock, |
| | resblock_kernel_sizes, |
| | resblock_dilation_sizes, |
| | upsample_rates, |
| | upsample_initial_channel, |
| | upsample_kernel_sizes, |
| | gin_channels=gin_channels, |
| | ) |
| | self.enc_q = PosteriorEncoder( |
| | spec_channels, |
| | inter_channels, |
| | hidden_channels, |
| | 5, |
| | 1, |
| | 16, |
| | gin_channels=gin_channels, |
| | ) |
| | |
| | self.flow = ResidualCouplingTransformersBlock( |
| | inter_channels, |
| | hidden_channels, |
| | 5, |
| | 1, |
| | 4, |
| | gin_channels=gin_channels, |
| | use_transformer_flows=self.use_transformer_flows, |
| | transformer_flow_type=self.transformer_flow_type, |
| | ) |
| |
|
| | if use_sdp: |
| | self.dp = StochasticDurationPredictor( |
| | hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels |
| | ) |
| | else: |
| | self.dp = DurationPredictor( |
| | hidden_channels, 256, 3, 0.5, gin_channels=gin_channels |
| | ) |
| |
|
| | if n_speakers > 1: |
| | self.emb_g = nn.Embedding(n_speakers, gin_channels) |
| |
|
| | def forward(self, x, x_lengths, y, y_lengths, sid=None): |
| | if self.n_speakers > 0: |
| | g = self.emb_g(sid).unsqueeze(-1) |
| | else: |
| | g = None |
| |
|
| | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, g=g) |
| | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) |
| | z_p = self.flow(z, y_mask, g=g) |
| |
|
| | with torch.no_grad(): |
| | |
| | s_p_sq_r = torch.exp(-2 * logs_p) |
| | neg_cent1 = torch.sum( |
| | -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True |
| | ) |
| | neg_cent2 = torch.matmul( |
| | -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r |
| | ) |
| | neg_cent3 = torch.matmul( |
| | z_p.transpose(1, 2), (m_p * s_p_sq_r) |
| | ) |
| | neg_cent4 = torch.sum( |
| | -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True |
| | ) |
| | neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 |
| |
|
| | if self.use_noise_scaled_mas: |
| | epsilon = ( |
| | torch.std(neg_cent) |
| | * torch.randn_like(neg_cent) |
| | * self.current_mas_noise_scale |
| | ) |
| | neg_cent = neg_cent + epsilon |
| |
|
| | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) |
| | attn = ( |
| | monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)) |
| | .unsqueeze(1) |
| | .detach() |
| | ) |
| |
|
| | w = attn.sum(2) |
| | if self.use_sdp: |
| | l_length = self.dp(x, x_mask, w, g=g) |
| | l_length = l_length / torch.sum(x_mask) |
| | logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0) |
| | logw_ = torch.log(w + 1e-6) * x_mask |
| | else: |
| | logw_ = torch.log(w + 1e-6) * x_mask |
| | logw = self.dp(x, x_mask, g=g) |
| | l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum( |
| | x_mask |
| | ) |
| |
|
| | |
| | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) |
| | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) |
| |
|
| | z_slice, ids_slice = commons.rand_slice_segments( |
| | z, y_lengths, self.segment_size |
| | ) |
| | o = self.dec(z_slice, g=g) |
| | return ( |
| | o, |
| | l_length, |
| | attn, |
| | ids_slice, |
| | x_mask, |
| | y_mask, |
| | (z, z_p, m_p, logs_p, m_q, logs_q), |
| | (x, logw, logw_), |
| | ) |
| |
|
| | def infer( |
| | self, |
| | x, |
| | x_lengths, |
| | sid=None, |
| | noise_scale=1, |
| | length_scale=1, |
| | noise_scale_w=1.0, |
| | max_len=None, |
| | ): |
| | if self.n_speakers > 0: |
| | g = self.emb_g(sid).unsqueeze(-1) |
| | else: |
| | g = None |
| | x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, g=g) |
| | if self.use_sdp: |
| | logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) |
| | else: |
| | logw = self.dp(x, x_mask, g=g) |
| | w = torch.exp(logw) * x_mask * length_scale |
| | w_ceil = torch.ceil(w) |
| | y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() |
| | y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to( |
| | x_mask.dtype |
| | ) |
| | attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) |
| | attn = commons.generate_path(w_ceil, attn_mask) |
| |
|
| | m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose( |
| | 1, 2 |
| | ) |
| | logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose( |
| | 1, 2 |
| | ) |
| |
|
| | z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale |
| | z = self.flow(z_p, y_mask, g=g, reverse=True) |
| | o = self.dec((z * y_mask)[:, :, :max_len], g=g) |
| | return o, attn, y_mask, (z, z_p, m_p, logs_p) |
| |
|
| | |
| | |
| | |
| | def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): |
| | assert self.n_speakers > 0, "n_speakers have to be larger than 0." |
| | g_src = self.emb_g(sid_src).unsqueeze(-1) |
| | g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) |
| | z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) |
| | z_p = self.flow(z, y_mask, g=g_src) |
| | z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) |
| | o_hat = self.dec(z_hat * y_mask, g=g_tgt) |
| | return o_hat, y_mask, (z, z_p, z_hat) |
| |
|