python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import logging from typing import Union, Tuple, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq.data import encoders from fairseq.data.audio.audio_utils import ( get_waveform as get_wav, convert_waveform as convert_wav, get_fbank, ) import fairseq.data.audio.feature_transforms.utterance_cmvn as utt_cmvn from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset logger = logging.getLogger(__name__) class S2THubInterface(nn.Module): def __init__(self, cfg, task, model): super().__init__() self.cfg = cfg self.task = task self.model = model self.model.eval() self.generator = self.task.build_generator([self.model], self.cfg) @classmethod def get_model_input(cls, task, audio: Union[str, torch.Tensor]): input_type = task.data_cfg.hub.get("input_type", "fbank80") if input_type == "fbank80_w_utt_cmvn": if isinstance(audio, str): feat = utt_cmvn.UtteranceCMVN()(get_fbank(audio)) feat = feat.unsqueeze(0) # T x D -> 1 x T x D else: import torchaudio.compliance.kaldi as kaldi feat = kaldi.fbank(audio, num_mel_bins=80).numpy() # 1 x T x D elif input_type in {"waveform", "standardized_waveform"}: if isinstance(audio, str): feat, sr = get_wav(audio) # C x T feat, _ = convert_wav( feat, sr, to_sample_rate=16_000, to_mono=True ) # C x T -> 1 x T else: feat = audio.numpy() else: raise ValueError(f"Unknown value: input_type = {input_type}") src_lengths = torch.Tensor([feat.shape[1]]).long() src_tokens = torch.from_numpy(feat) # 1 x T (x D) if input_type == "standardized_waveform": with torch.no_grad(): src_tokens = F.layer_norm(src_tokens, src_tokens.shape) return { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "prev_output_tokens": None, }, "target_lengths": None, "speaker": None, } @classmethod def detokenize(cls, task, tokens): text = task.tgt_dict.string(tokens) tkn_cfg = task.data_cfg.bpe_tokenizer tokenizer = encoders.build_bpe(Namespace(**tkn_cfg)) return text if tokenizer is None else tokenizer.decode(text) @classmethod def get_prefix_token(cls, task, lang): prefix_size = int(task.data_cfg.prepend_tgt_lang_tag) prefix_tokens = None if prefix_size > 0: assert lang is not None lang_tag = SpeechToTextDataset.get_lang_tag_idx(lang, task.tgt_dict) prefix_tokens = torch.Tensor([lang_tag]).long().unsqueeze(0) return prefix_tokens @classmethod def get_prediction( cls, task, model, generator, sample, tgt_lang=None, synthesize_speech=False ) -> Union[str, Tuple[str, Tuple[torch.Tensor, int]]]: _tgt_lang = tgt_lang or task.data_cfg.hub.get("tgt_lang", None) prefix = cls.get_prefix_token(task, _tgt_lang) pred_tokens = generator.generate([model], sample, prefix_tokens=prefix) pred = cls.detokenize(task, pred_tokens[0][0]["tokens"]) if synthesize_speech: pfx = f"{_tgt_lang}_" if task.data_cfg.prepend_tgt_lang_tag else "" tts_model_id = task.data_cfg.hub.get(f"{pfx}tts_model_id", None) if tts_model_id is None: logger.warning("TTS model configuration not found") else: _repo, _id = tts_model_id.split(":") tts_model = torch.hub.load(_repo, _id, verbose=False) pred = (pred, tts_model.predict(pred)) return pred def predict( self, audio: Union[str, torch.Tensor], tgt_lang: Optional[str] = None, synthesize_speech: bool = False, ) -> Union[str, Tuple[str, Tuple[torch.Tensor, int]]]: # `audio` is either a file path or a 1xT Tensor # return either text or (text, synthetic speech) sample = self.get_model_input(self.task, audio) return self.get_prediction( self.task, self.model, self.generator, sample, tgt_lang=tgt_lang, synthesize_speech=synthesize_speech, )
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/hub_interface.py
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import re from functools import partial from typing import List, Optional, Tuple import torch import torch.nn as nn from torch import Tensor from torch import device as Device from fairseq.models import FairseqEncoder from fairseq.models.speech_to_text.utils import ( NoOp, attention_suppression, layer_norm_backward_hook, lengths_to_padding_mask, segments_to_sequence, ) try: import torch.ao.quantization as quantization from torch.ao.quantization.qconfig import ( default_dynamic_qconfig, per_channel_dynamic_qconfig, ) except ImportError: import torch.quantization as quantization from torch.quantization.qconfig import ( default_dynamic_qconfig, per_channel_dynamic_qconfig, ) class RelativePositionEmbedding(nn.Module): """ Implementation according to https://arxiv.org/abs/1803.02155 """ def __init__(self, head_dim, max_position, norm_init=True): super().__init__() self.head_dim = head_dim self.max_position = max_position self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim)) if norm_init: nn.init.xavier_normal_(self.embeddings) else: nn.init.xavier_uniform_(self.embeddings) def forward(self, input: Tensor): output = nn.functional.embedding(input.long(), self.embeddings) return output class Fp32LayerNorm(nn.Module): def __init__( self, input_dim, clamp_grad=True, max_grad_value=256, eps=1e-5, elementwise_affine=True, ): super().__init__() self.torch_module = torch.nn.LayerNorm( input_dim, eps=eps, elementwise_affine=elementwise_affine ) if clamp_grad: hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value) self.torch_module.register_backward_hook(hook) def forward(self, input): output = torch.nn.functional.layer_norm( input.float(), self.torch_module.normalized_shape, self.torch_module.weight.float() if self.torch_module.weight is not None else None, self.torch_module.bias.float() if self.torch_module.bias is not None else None, self.torch_module.eps, ).type_as(input) return output # ------------------------------------------------------------------------------ # PositionwiseFF # ------------------------------------------------------------------------------ class PositionwiseFF(nn.Module): """ FFN layer in transformer. Args: input_dim: input embedding dimension ffn_dim: FFN layer inner dimension dropout_on_fc1: dropout for first linear layer dropout_on_fc2: dropout fr second linear layer activation_fn: activation function used after first linear layer. \ Only relu or gelu is supported. """ def __init__( self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn ): super(PositionwiseFF, self).__init__() self.input_dim = input_dim self.ffn_dim = ffn_dim if activation_fn == "relu": ac = nn.ReLU() elif activation_fn == "gelu": ac = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(activation_fn)) # fc1 -> ac -> dropout -> fc2 -> dropout self.module = nn.Sequential( nn.Linear(input_dim, ffn_dim), ac, nn.Dropout(dropout_on_fc1), nn.Linear(ffn_dim, input_dim), nn.Dropout(dropout_on_fc2), ) self.layer_norm = Fp32LayerNorm(input_dim) def forward(self, input): module_out = self.module(self.layer_norm(input)) output = module_out + input return output def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # SummarizationLayer # ------------------------------------------------------------------------------ class SummarizationLayer(nn.Module): def __init__(self, method, segment_size, embedding_dim): super(SummarizationLayer, self).__init__() self.segment_size = segment_size self.embedding_dim = embedding_dim nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method) self.method = method if method == "mean": self.module = nn.AvgPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "max": self.module = nn.MaxPool1d( kernel_size=segment_size, stride=segment_size, ceil_mode=True, ) elif method == "linear": self.module = nn.Linear(segment_size, 1) elif nonlin_match: nonlin_args = nonlin_match.groupdict() act_type = nonlin_args["act"] hid_dim = int(nonlin_args["dim"]) if act_type == "relu": act = nn.ReLU() elif act_type == "gelu": act = nn.GELU() else: raise ValueError("Unsupported activation_fn = ({})".format(act_type)) self.module = nn.Sequential( nn.Linear(segment_size, hid_dim), act, nn.Linear(hid_dim, 1), ) else: raise ValueError("Unsupported summarization method = ({})".format(method)) def forward(self, input): # T, B, D -> B, D, T input = input.permute(1, 2, 0) if self.method == "mean" or self.method == "max": output = self.module(input) output = output.permute(2, 0, 1) return output full_seg_length = input.size(2) // self.segment_size * self.segment_size if full_seg_length > 0: # at least one seg is full B = input.size(0) D = input.size(1) input_todo = ( input[:, :, :full_seg_length] .contiguous() .view(B, -1, self.segment_size) ) output = self.module(input_todo) output = output.view(B, D, -1) else: output = input.new_zeros(input.size(0), input.size(1), 0) left = input.size(2) - full_seg_length if left > 0: # when last seg is not full, use zeros as last memory placeholder zeros = input.new_zeros(input.size(0), input.size(1), 1) output = torch.cat([output, zeros], dim=2) output = output.permute(2, 0, 1) return output # ------------------------------------------------------------------------------ # NoSegAugmentedMemoryMultiheadAttentionBmm # ------------------------------------------------------------------------------ class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module): """ Whole utterance augmented memory multihead attention using BMM. Different with previous augmented memory multihead attention where the utterance is chunked into segments. Here we use attention mask achieve so. The input embedding [right_context, utterance, summary] is a concatenation of right context, utterance and summary. Right context block is the concatenation of all the right context for each segments. [right_context_0, right_context_1, ..., right_context_n] For example, if we have utterance = [v0, v1, v2, ...., v20]. segment size 8, right_context size 4. Then the right context blocks = [v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10, and v11 are the right context for first segment. v16, v17, v18 and v19 are the right context for second segment. 0, 0, 0 and 0 are right context for the last segment. utterance is corresponding to input embedding sequence summary is concatenation of average of each segments. [summary_0, summary_1, ..., ]. In augmented memory multihead attention, the query is [right_context, utterance, summary], key is [memory, right_context, utterance]. Different with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from previous attention layer. For the first attention layer, memory is average of each segment. Memory is a concatenation of memory from each segments in previous attention layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n]. Each m_k is the output from seg_k in layer i-1. args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention dropout: attention dropout std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. scaled_init: whether to use scaled init for linear weight tanh_on_mem: whether to use tanh on memory output use_mem: whether to use memory or not. When max_memory_size is 0, then we don't have memory anymore. layer_index: current self-attention layer index that is used in depth initialization max_relative_position: max relative position used in relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe """ def __init__( self, input_dim, num_heads, dropout=0.0, std_scale=None, scaled_init=False, tanh_on_mem=False, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, max_relative_position=0, rpe_old_option=True, ): if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) super().__init__() embed_dim = input_dim self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True) self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True) self.rpe_old_option = rpe_old_option if max_relative_position > 0: self.use_rpe = True self.rpe_k = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) self.rpe_v = RelativePositionEmbedding( head_dim=input_dim // num_heads, max_position=max_relative_position, ) else: self.use_rpe = False self.rpe_k = None self.rpe_v = None if scaled_init: if layer_index == -1: gain = 1.0 / math.sqrt(2) else: # https://arxiv.org/abs/2005.09684 depthwise initialization # stablize the training greatly. Use depthwise initialization to # replace incremental loss. gain = 1.0 / math.sqrt(layer_index + 1) torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain) torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain) self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.scaling = self.head_dim ** -0.5 self.std_scale = std_scale self.use_mem = use_mem self.mini_batches = mini_batches self.negative_inf = negative_inf if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = NoOp() self.nonlinear_squash_mem = False def prepare_qkv( self, input: Tensor, mems: Tensor, lengths: Tensor, summary_length: int, lc_length: int, ): # T: right_context length + utterance_length + summary_length T, B, D = input.shape mem_length = mems.size(0) utterance_length = torch.max(lengths) right_context_blocks_length = T - utterance_length - summary_length rc_block = input[:right_context_blocks_length, :, :] utterance_block = input[right_context_blocks_length : T - summary_length, :, :] if B == 1: padding_mask = None else: klengths = lengths + mem_length + right_context_blocks_length + lc_length padding_mask = lengths_to_padding_mask(lengths=klengths) mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0) # In training lc_length = 0 key_length = mem_rc_input.size(0) + lc_length rc_input_sum = input q = self.e2h_q(rc_input_sum) kv = self.e2h_kv(mem_rc_input) k, v = kv.chunk(chunks=2, dim=2) result_qkv = (q, k, v) input_shape = (T, B, D) result_lengths_info = ( mem_length, utterance_length, right_context_blocks_length, key_length, ) if padding_mask is not None: assert padding_mask.size(0) == B assert padding_mask.size(1) == key_length return result_qkv, input_shape, result_lengths_info, padding_mask def prepare_attention_weights( self, q: Tensor, new_k: Tensor, new_v: Tensor, input_shape: Tuple[int, int, int], rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor]: T, B, D = input_shape q = ( q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1) * self.scaling ) k = ( new_k.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( new_v.contiguous() .view(-1, B * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_k = self.rpe_k(rpe) # [q, B*h, d] * [q, k, d] -> [B*h, q, k] attention_weights_rpe = torch.matmul( q.transpose(0, 1), r_k.transpose(1, 2) ).transpose(0, 1) attention_weights = attention_weights + attention_weights_rpe attention_weights_float = attention_weights.float() return attention_weights, attention_weights_float, v def prepare_attention_output( self, attention_weights: Tensor, attention_weights_float: Tensor, v: Tensor, input_shape: Tuple[int, int, int], key_length: int, padding_mask: Optional[Tensor], rpe: Optional[Tensor], ) -> Tensor: T, B, D = input_shape if padding_mask is not None: attention_weights_float = attention_weights_float.view( B, self.num_heads, T, key_length ) attention_weights_float = attention_weights_float.masked_fill( padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attention_weights_float = attention_weights_float.view( B * self.num_heads, T, key_length ) if self.std_scale is not None: attention_weights_float = attention_suppression( attention_weights_float, self.std_scale ) attention_weights_float = torch.nn.functional.softmax( attention_weights_float, dim=-1 ) attention_weights = attention_weights_float.type_as(attention_weights) attention_probs = torch.nn.functional.dropout( attention_weights, p=self.dropout, training=self.training ) # [T, key_length, B, n_head]+ [key_length, B, n_head, d_head] # -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) if self.use_rpe and rpe is not None and self.rpe_v is not None: r_v = self.rpe_v(rpe) attention_rpe = torch.matmul( attention_probs.transpose(0, 1), r_v ).transpose(0, 1) if self.rpe_old_option: attention += attention + attention_rpe else: attention = attention + attention_rpe assert list(attention.shape) == [B * self.num_heads, T, self.head_dim] attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim) rc_output_memory = self.out_proj(attention) return rc_output_memory @torch.jit.unused def forward( self, input: Tensor, lengths: Tensor, mems: Tensor, attention_mask: Tensor, pre_mems: Optional[Tensor] = None, left_context_key: Optional[Tensor] = None, left_context_val: Optional[Tensor] = None, rpe: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. attention_mask: attention mask for query = [right_context, query, summary] key = [mem, right_context, query]. This is only used for traing. """ if self.use_mem: mem_length = mems.size(0) summary_length = mem_length + 1 if pre_mems is not None: mems = torch.cat([pre_mems, mems], dim=0) else: mem_length = 0 summary_length = 0 # In training, lc_length = 0 if left_context_key is not None: lc_length = left_context_key.size(0) else: lc_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info if left_context_key is not None: # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] else: new_k = k new_v = v next_k = None next_v = None attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # mask attention attention_mask = attention_mask.unsqueeze(0) attention_weights_float = attention_weights_float.masked_fill( attention_mask, float(self.negative_inf) ) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) if self.use_mem: # next_m length equals to summary length - 1 # last memory is ignored if self.mini_batches: next_m = rc_output_memory[-summary_length:] else: next_m = rc_output_memory[-summary_length:-1] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-summary_length] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: next_m = mems rc_output = rc_output_memory return rc_output, next_m, next_k, next_v @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """ forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding. args: input: formed in the following way [right_context_0, right_contex_1, ..., seg_0, seg_1, ..., summary_0, summary_1,..] lengths: the length of query which is [seg_0, seg_1, ....] mems: [mem_0, mem_1, ...]. left_context_key: left_context for key part. This is only used for online decoding. In training, this is empty tensor left_context_val: left_context for value part. This is only used for online decoding. In training, this is empty tensor """ lc_length = left_context_key.size(0) # In decoding, summary_length = 1 or 0 if self.use_mem: summary_length = 1 else: summary_length = 0 results = self.prepare_qkv( input=input, mems=mems, lengths=lengths, summary_length=summary_length, lc_length=lc_length, ) result_qkv, input_shape, result_lengths_info, padding_mask = results q, k, v = result_qkv ( mem_length, utterance_length, right_context_blocks_length, key_length, ) = result_lengths_info # add the cache key and value new_k = torch.cat( [ k[: mem_length + right_context_blocks_length, :, :], left_context_key, k[-utterance_length:, :, :], ], dim=0, ) new_v = torch.cat( [ v[: mem_length + right_context_blocks_length, :, :], left_context_val, v[-utterance_length:, :, :], ], dim=0, ) next_k = new_k[mem_length + right_context_blocks_length :, :, :] next_v = new_v[mem_length + right_context_blocks_length :, :, :] attention_weights, attention_weights_float, v = self.prepare_attention_weights( q=q, new_k=new_k, new_v=new_v, input_shape=input_shape, rpe=rpe, ) # In online decoding, we don't have attention mask. But we still need # to disable the attention from summary query to memory attention_weights_float[:, -1, :mem_length] = float(self.negative_inf) rc_output_memory = self.prepare_attention_output( attention_weights=attention_weights, attention_weights_float=attention_weights_float, v=v, input_shape=input_shape, key_length=key_length, padding_mask=padding_mask, rpe=rpe, ) # In decoding, summary length is 1 if self.use_mem: next_m = rc_output_memory[-1:] next_m = self.squash_mem(next_m) # rc and output rc_output = rc_output_memory[:-1] if not self.nonlinear_squash_mem: next_m = torch.clamp(next_m, min=-10, max=10) else: rc_output = rc_output_memory # empty tensor as input mems next_m = mems return rc_output, next_m, next_k, next_v def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self class NoSegAugmentedMemoryTransformer(nn.Module): """ Whole utterance augmented memory transformer. This is not pyspeech nn layer. It is used as a module in a master layer where multiple transformers is used. """ def __init__( self, input_dim, num_heads, ffn_dim, dropout_in_attn=0.0, dropout_on_attn=None, dropout_on_fc1=None, dropout_on_fc2=None, activation_fn="relu", tanh_on_mem=False, std_scale=None, scaled_init=False, segment_size=128, use_mem=True, mini_batches=False, negative_inf="-inf", layer_index=-1, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super(NoSegAugmentedMemoryTransformer, self).__init__() self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm( input_dim=input_dim, num_heads=num_heads, dropout=dropout_in_attn, scaled_init=scaled_init, tanh_on_mem=tanh_on_mem, std_scale=std_scale, use_mem=use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, max_relative_position=max_relative_position, ) self.dropout = nn.Dropout(dropout_on_attn) self.pos_ff = PositionwiseFF( input_dim=input_dim, ffn_dim=ffn_dim, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, activation_fn=activation_fn, ) self.layer_norm_pre = Fp32LayerNorm(input_dim) self.layer_norm = Fp32LayerNorm(input_dim) self.segment_size = segment_size self.use_mem = use_mem self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) def set_mini_batches(self, mini_batches): self.attention.mini_batches = mini_batches def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def pre_attention_ops(self, input, right_context_blocks): rc_length = right_context_blocks.size(0) input_length = input.size(0) rc_and_input = torch.cat([right_context_blocks, input], dim=0) residual_input = rc_and_input rc_and_input = self.layer_norm_pre(rc_and_input) query_input = rc_and_input[-input_length:, :, :] return rc_length, input_length, residual_input, query_input, rc_and_input def after_attention_ops(self, attention_output, residual_input): output = self.dropout(attention_output) output = output + residual_input output = self.pos_ff(output) output = self.layer_norm(output) return output @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, mems: Tensor, left_context_key: Tensor, left_context_val: Tensor, right_context_blocks: Tensor, rpe: Optional[Tensor], ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results # In online decoding, the summary query size is always 1 or 0 if self.use_mem: summary_query = self.gen_summary_queries(query_input) summary_query = summary_query[0:1, :, :] rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention.forward_jit( input=rc_qu_su, lengths=lengths, mems=mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results @torch.jit.unused def forward( self, input, lengths, mems, right_context_blocks, attention_mask, pre_mems, left_context_key, left_context_val, rpe, ): results = self.pre_attention_ops(input, right_context_blocks) rc_length, input_length, residual_input, query_input, rc_and_input = results if self.use_mem: summary_query = self.gen_summary_queries(query_input) rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0) else: rc_qu_su = rc_and_input rc_output, next_m, next_k, next_v = self.attention( input=rc_qu_su, lengths=lengths, mems=mems, attention_mask=attention_mask, pre_mems=pre_mems, left_context_key=left_context_key, left_context_val=left_context_val, rpe=rpe, ) # [TODO] Note memory did not go through pos_ff. What happen if we pass # memory through the pos_ff as well? rc_output = self.after_attention_ops(rc_output, residual_input) results = ( rc_output[-input_length:, :, :], next_m, rc_output[0:rc_length, :, :], next_k, next_v, ) return results class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder): """ Whole utterance augmented memory transformer encoder layer. This is a master layer where we can define multiple augmented memory transformers. There are two reasons to setup the master layer. 1. We only need to define once about the attention mask. All the layers in the master layer share the same mask. 2. pyspeech nn layer has special input and output format. Defining one master layer is easier to passing memory between different layes inside the master layer args: input_dim: input embedding dimension num_heads: number of heads in multihead self-attention ffn_dim: ffn dimension in FFN layer num_layers: number of augmented memory transformer layers dropout_in_attn: dropout used in multi-head self-attention dropout_on_attn: dropout used for output from te multihead self-attention dropout_on_fc1: dropout used in FFN layer for the first linear layer dropout_on_fc2: dropout used in FFN layer for the second linear layer segment_size: segment size for each segment context_config: (left_context_size, right_context_size) defines the surround context size for each segment max_memory_size: maximum memory size used for each segment scaled_init: whether use scaled init for weight initialization in attention layer std_scale: if std_scale is not None. The weak attention suppression is turned on. For std_scale = 0.5, all the attention smaller than mean + 0.5 * std will be suppressed. activation_fn: activation function used in FFN layer. [ReLU, GELU] supported tanh_on_mem: whether use tanh on memory mini_batches: use mini-btach training negative_inf: the negative infinity value used in attention masking. default is "-inf". For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue. summarization_method: method to generate segment summrization embedding max_relative_position: max relatie position for relative position embedding rpe_old_option: To be compatible with previous model. The previous model was trained with attention += attention + rpe. The correct equation should be attention = attention + rpe [TODO]: remove the rpe_old_option by the end of 2021 Q1. """ def __init__( self, input_dim, num_heads, ffn_dim, num_layers=1, dropout_in_attn=0.0, dropout_on_attn=0.0, dropout_on_fc1=0.0, dropout_on_fc2=0.0, segment_size=128, context_config=(0, 0), max_memory_size=0, scaled_init=True, std_scale=None, activation_fn="relu", tanh_on_mem=False, mini_batches=False, negative_inf="-inf", deep_init=True, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): super().__init__(None) if input_dim % num_heads: raise ValueError( "input_dim ({}) must be divisible by num_heads ({})".format( input_dim, num_heads ) ) # we used to support growing memory size. However, it will cause # cross stream batching failure. Now we need to have exact max memory size if max_memory_size < 0: raise ValueError("max_memory_size must be >= 0") # Only assign right_context. In decoding, left context will be cached. # No need to let the online decoder to re-assign the left context self.left_context, self.right_context = context_config self.segment_size = segment_size self.memory_dim = input_dim self.max_memory_size = max_memory_size self.mini_batches = mini_batches if self.max_memory_size != 0: self.use_mem = True else: self.use_mem = False self.memory_op = SummarizationLayer( summarization_method, segment_size, input_dim ) self.layers = torch.nn.ModuleList() self.num_layers = num_layers self.max_relative_position = max_relative_position if self.max_relative_position > 0: self.use_rpe = True else: self.use_rpe = False for i in range(self.num_layers): if deep_init: layer_index = i else: layer_index = -1 self.layers.append( NoSegAugmentedMemoryTransformer( num_heads=num_heads, input_dim=input_dim, ffn_dim=ffn_dim, dropout_in_attn=dropout_in_attn, dropout_on_attn=dropout_on_attn, dropout_on_fc1=dropout_on_fc1, dropout_on_fc2=dropout_on_fc2, segment_size=segment_size, std_scale=std_scale, activation_fn=activation_fn, tanh_on_mem=tanh_on_mem, scaled_init=scaled_init, use_mem=self.use_mem, mini_batches=mini_batches, negative_inf=negative_inf, layer_index=layer_index, summarization_method=summarization_method, max_relative_position=max_relative_position, rpe_old_option=rpe_old_option, ) ) def set_mini_batches(self, mini_batches): # handy function only used for unit test self.mini_batches = mini_batches for layer in self.layers: layer.set_mini_batches(mini_batches) def _get_relative_position( self, input: Tensor, max_relative_position: int, left_context_length: int, past_length: int, is_decoding: bool, ): # For training, we copy the right context to the start of the utterance # First dimension in distance is corresponding to query. # [right context, utterance, summary vector] # Second dimension in distance is corresponding to key. # [Memory bank, right context, utterance] # For summary vector in query part, the distance with # all other position is 2*max_position. For memory bank in key, # the distance with all other positions is 0. T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) # utterance u_st = past_length * self.segment_size u_ed = u_st + T utterance_ranges = torch.arange(u_st, u_ed - self.right_context) # left context. Only in minibatch or decoding left_context_ranges = torch.arange(u_st - left_context_length, u_st) # Right context block # right context + utterance right_context_blocks = [] for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size + u_st ed = st + self.right_context assert ed < u_ed temp = torch.arange(st, ed) right_context_blocks.append(temp) right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed)) right_context_ranges = torch.cat(right_context_blocks) if self.use_mem: # Memory bank # The position for memory -n, .., -1 if is_decoding: memory_size = min(past_length, self.max_memory_size) else: memory_size = num_segs + past_length - 1 memory_bank_ranges = torch.arange( -max_relative_position - 1, -max_relative_position - 1 - memory_size, -1 ) # summary vector # The position for summary vector as the T+max_relative_position+1. # After the clamping, the relative position is max_relative_position summary_pos_st = u_ed + max_relative_position + 1 summary_vector_ranges = torch.arange( summary_pos_st, summary_pos_st + num_segs ) key_ranges = torch.cat( [ memory_bank_ranges, right_context_ranges, left_context_ranges, utterance_ranges, ] ) query_ranges = torch.cat( [right_context_ranges, utterance_ranges, summary_vector_ranges] ) else: key_ranges = torch.cat( [right_context_ranges, left_context_ranges, utterance_ranges] ) query_ranges = torch.cat([right_context_ranges, utterance_ranges]) distance = key_ranges[None, :] - query_ranges[:, None] distance_clamp = ( torch.clamp(distance, -max_relative_position, max_relative_position) + max_relative_position ) distance_clamp = distance_clamp.to(input.device).long().detach() return distance_clamp def _get_attention_mask(self, input, past_length=0, left_context_cache=0): # attention mask for each query contains three parts: # 1. memory part # 2. left_context + segment # 3. right_context_block # so for each segment and its correspoinding right context block, # the attention matrix is formed by 9 parts: # [0, m, 0, 0, right_context, 0, 0, seg, 0] # [before memory, memory, after memory, before right context, right_context, # after right context, before seg, seg, after seg] # # Query is formed in the way as [right_context_blocks, utterance, summary] # # Note: put m and right_context before segment is convenient # for padding_mask operation. # Key lengths = m_length + right_context_block_length + lengths utterance_length, batch_size, _ = input.shape summary_length = math.ceil(utterance_length / self.segment_size) num_segs = summary_length rc_length = self.right_context * num_segs rc = self.right_context lc = self.left_context # using mini-batches, there is left context cache available for current # sequence. lcc = left_context_cache # max_memory_size is 0 then we don't have memory and summary # past_length is the memory carry from previous sequence if self.use_mem: mem_length = num_segs - 1 + past_length else: mem_length = 0 rc_mask = [] query_mask = [] summary_mask = [] for j in range(0, num_segs): ssize = min(self.segment_size, utterance_length - j * self.segment_size) rc_size = rc rc_mat = [] q_mat = [] s_mat = [] m_start = max(j + past_length - self.max_memory_size, 0) # max_memory_size is 0, then we don't use memory if self.use_mem: # part 0: before memory rc_mat.append(input.new_zeros(rc_size, m_start)) q_mat.append(input.new_zeros(ssize, m_start)) s_mat.append(input.new_zeros(1, m_start)) # part 1: memory col_1 = j + past_length - m_start rc_mat.append(torch.ones(rc_size, col_1, device=input.device)) q_mat.append(torch.ones(ssize, col_1, device=input.device)) # based on D22875746, disable summary query attention # on memeory is better for long form utterance s_mat.append(input.new_zeros(1, col_1)) # part 2: after memory col_2 = mem_length - (j + past_length) rc_mat.append(input.new_zeros(rc_size, col_2)) q_mat.append(input.new_zeros(ssize, col_2)) s_mat.append(input.new_zeros(1, col_2)) # part 3: before right context rc_start = j * rc rc_mat.append(input.new_zeros(rc_size, rc_start)) q_mat.append(input.new_zeros(ssize, rc_start)) s_mat.append(input.new_zeros(1, rc_start)) # part 4: right context rc_end = rc_start + rc col_4 = rc rc_mat.append(torch.ones(rc_size, col_4, device=input.device)) q_mat.append(torch.ones(ssize, col_4, device=input.device)) s_mat.append(torch.ones(1, col_4, device=input.device)) # part 5: after right context col_5 = rc_length - rc_end rc_mat.append(input.new_zeros(rc_size, col_5)) q_mat.append(input.new_zeros(ssize, col_5)) s_mat.append(input.new_zeros(1, col_5)) # part 6: before query segment seg_start = max(j * self.segment_size + lcc - lc, 0) rc_mat.append(input.new_zeros(rc_size, seg_start)) q_mat.append(input.new_zeros(ssize, seg_start)) s_mat.append(input.new_zeros(1, seg_start)) # part 7: query segment # note: right context is put in right context block # here we only need to consider about left context seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc) col_7 = seg_end - seg_start rc_mat.append(torch.ones(rc_size, col_7, device=input.device)) q_mat.append(torch.ones(ssize, col_7, device=input.device)) s_mat.append(torch.ones(1, col_7, device=input.device)) # part 8: after query segment col_8 = utterance_length + lcc - seg_end rc_mat.append(input.new_zeros(rc_size, col_8)) q_mat.append(input.new_zeros(ssize, col_8)) s_mat.append(input.new_zeros(1, col_8)) rc_mask.append(torch.cat(rc_mat, dim=1)) query_mask.append(torch.cat(q_mat, dim=1)) summary_mask.append(torch.cat(s_mat, dim=1)) # no memory, then we don't need summary either if self.use_mem: attention_mask = ( 1 - torch.cat( [ torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0), torch.cat(summary_mask, dim=0), ], dim=0, ) ).to(torch.bool) else: attention_mask = ( 1 - torch.cat( [torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0 ) ).to(torch.bool) return attention_mask @torch.jit.export def init_state( self, batch_size: int, device: Optional[Device] = None ) -> List[Tensor]: empty_memory = torch.zeros( self.num_layers, self.max_memory_size, batch_size, self.memory_dim, device=device, ) left_context_key = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) left_context_val = torch.zeros( self.num_layers, self.left_context, batch_size, self.memory_dim, device=device, ) past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) return [empty_memory, left_context_key, left_context_val, past_length] @torch.jit.export def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]: if len(states) == 0: return [] batched_m = [] batched_lc_key = [] batched_lc_val = [] batched_past_length = [] for state in states: if len(state) == 0: continue m, lc_key, lc_val, past_length = state batched_m.append(m) batched_lc_key.append(lc_key) batched_lc_val.append(lc_val) batched_past_length.append(past_length) if ( (len(batched_m) == 0) or (len(batched_lc_key) == 0) or (len(batched_lc_val) == 0) or (len(batched_past_length) == 0) ): return [ torch.tensor([]), torch.tensor([]), torch.tensor([]), torch.tensor([]), ] batched_m = torch.cat(batched_m, dim=2) batched_lc_key = torch.cat(batched_lc_key, dim=2) batched_lc_val = torch.cat(batched_lc_val, dim=2) batched_past_length = torch.cat(batched_past_length, dim=1) return [batched_m, batched_lc_key, batched_lc_val, batched_past_length] @torch.jit.export def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: if len(state) == 0: return [] m, lc_key, lc_val, past_length = state indices = indices.to(device=m.device) reord_m = torch.index_select(m, 2, indices) reord_lc_key = torch.index_select(lc_key, 2, indices) reord_lc_val = torch.index_select(lc_val, 2, indices) reord_past_length = torch.index_select(past_length, 1, indices) return [reord_m, reord_lc_key, reord_lc_val, reord_past_length] @torch.jit.export def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: m, lc_key, lc_val, past_length = state m = m.index_fill(dim=2, index=indices, value=0.0) lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0) lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0) past_length = past_length.index_fill(dim=1, index=indices, value=0) return [m, lc_key, lc_val, past_length] @torch.jit.export def state_size(self) -> int: return 4 @torch.jit.export def batch_size_in_state( self, state: Optional[List[Tensor]], sloppy: bool = True ) -> Optional[int]: if state is None: return None return state[0].size(2) def gen_summary_queries(self, input): sum_input = self.memory_op(input) return sum_input def _gen_right_context_padded_input(self, input): # This function deals with input that is already # padded with right context (e.g. minibatch training) right_context_blocks = [] T, B, D = input.shape num_segs = math.ceil((T - self.right_context) / self.segment_size) for i in range(0, num_segs - 1): st = (i + 1) * self.segment_size ed = st + self.right_context assert ed < T temp = input[st:ed, :, :] right_context_blocks.append(temp) # last segment right context is already available right_context_blocks.append(input[T - self.right_context :, :, :]) return torch.cat(right_context_blocks, dim=0) def _gen_segs_right_context(self, input, lengths): segments = [] T, B, D = input.size() nT = T - self.right_context # assume input is right context padded num_segs = math.ceil(nT / self.segment_size) # pad zeros to the utterance to make sure each # segment has the same right context. For the for i in range(0, num_segs - 1): st = i * self.segment_size ed = min(T, st + self.segment_size + self.right_context) temp = input[st:ed, :, :] rest_lengths = torch.clamp( lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size ) segments.append((temp, lengths - rest_lengths + self.right_context)) lengths = rest_lengths last_seg = input[st + self.segment_size :, :, :] segments.append((last_seg, rest_lengths + self.right_context)) return segments @torch.jit.unused def forward( self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: # Xutai: originally the second argument is lengths. lengths = (~padding_masks).sum(dim=1).long() # mini batch training. if self.mini_batches: return self.forward_mini_batches(input, lengths, state) # regular full sequence training. Note, assume the right context in provided # in the input. T, B, D = input.size() right_context_blocks = self._gen_right_context_padded_input(input) # generate the relative positional embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=0, past_length=0, is_decoding=False, ) else: rpe = None input = input[: T - self.right_context, :, :] attention_mask = self._get_attention_mask(input) # firt layer use each segment mean as memory # ignore the last one seg average if self.use_mem: mems = self.gen_summary_queries(input)[:-1, :, :] else: mems = torch.zeros(0, input.size(1), input.size(2), device=input.device) mems = mems.type_as(input) output = input all_outputs = [] for layer in self.layers: output, mems, right_context_blocks, _, _ = layer( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=None, left_context_key=None, left_context_val=None, rpe=rpe, ) all_outputs.append(output) return output, padding_masks, [], all_outputs def forward_jit_mini_batch_init( self, seg: Tensor, state: Optional[List[Tensor]] = None, is_decoding: bool = False, ): # Prepare state. In whole sequence training, state is ignored. # For minibatch training, we need to prepare state if state is None: state = self.init_state(batch_size=seg.size(1), device=seg.device) if seg.dtype == torch.half: state = [state[0].half(), state[1].half(), state[2].half(), state[3]] if self.use_mem: # note input average only on seg, not on right context # first layer use each segmetn mean as memory. the last # one segment average is used in state full_mems = self.gen_summary_queries(seg) if is_decoding: mems = full_mems[0:1, :, :] state_mems = torch.cat([state[0][0], mems], dim=0) else: mems = full_mems[:-1, :, :] state_mems = torch.cat([state[0][0], full_mems], dim=0) else: mems = state[0][0] state_mems = mems # track processed segment number or memory number # the same batch as the same bumber of past length past_length = state[3][0][0].item() past_left_context = min(past_length * self.segment_size, self.left_context) past_length = min(self.max_memory_size, past_length) return state, mems, state_mems, past_length, past_left_context def state_update_before( self, layer: int, state: List[Tensor], past_length: int, past_left_context: int ): pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :] lc_key = state[1][layer][self.left_context - past_left_context :, :, :] lc_val = state[2][layer][self.left_context - past_left_context :, :, :] return pre_mems, lc_key, lc_val def state_update_after( self, layer: int, state: List[Tensor], mems: Tensor, next_key: Tensor, next_val: Tensor, mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], ): # mems is used for next layer if layer < self.num_layers - 1: state_mems = torch.cat([state[0][layer + 1], mems], dim=0) mems_list.append(state_mems[-self.max_memory_size :, :, :]) # when mems pass to next sequence, we need the last memory. when mems # use for the next layer, we can ignore the last memory mems = mems[:-1, :, :] # note state[1][i] and state[2][i] original length equals to self.left_context new_k = torch.cat([state[1][layer], next_key], dim=0) new_v = torch.cat([state[2][layer], next_val], dim=0) lc_key_list.append(new_k[-self.left_context :, :, :]) lc_val_list.append(new_v[-self.left_context :, :, :]) return mems_list, lc_key_list, lc_val_list, mems def state_update_after_loop( self, state: List[Tensor], mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], update_length: int, ): state[0] = torch.stack(mems_list, dim=0) state[1] = torch.stack(lc_key_list, dim=0) state[2] = torch.stack(lc_val_list, dim=0) state[3] = state[3] + update_length return state @torch.jit.unused def forward_mini_batches( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: T, B, D = input.size() # input without right context seg = input[: T - self.right_context, :, :] # get right context blocks right_context_blocks = self._gen_right_context_padded_input(input) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, False) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=False, ) else: rpe = None # get attention mask based on seg (not include right context) and available # left context attention_mask = self._get_attention_mask(seg, past_length, past_left_context) mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 all_outputs = [] for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. pre_mems, lc_key, lc_val = self.state_update_before( i, state, past_length, past_left_context ) output, mems, right_context_blocks, next_key, next_val = layer.forward( input=output, lengths=lengths, attention_mask=attention_mask, mems=mems, right_context_blocks=right_context_blocks, pre_mems=pre_mems, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) all_outputs.append(output) mems_list, lc_key_list, lc_val_list, mems = self.state_update_after( layer=i, state=state, mems=mems, next_key=next_key, next_val=next_val, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state update_length = math.ceil((T - self.right_context) / self.segment_size) state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=update_length, ) return output, lengths, state, all_outputs def forward_jit_test( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ This one simulate sequence encoder forward jit. This is for unit test purpose. It is not used in training or decoding. Note, extra_right_context is set in the model. In unit test, input = [utterance, right_context], lengths = [utterance_length]. args: input: input utterance lengths: utterance input length state: None here. input is whole utterance """ # [TODO] sequence_to_segment has bug in lengths. seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths) seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = [] state: Optional[List[Tensor]] = None for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: seg_enc_tokens, seg_enc_lengths, state = self.forward_jit( input=seg_src_tokens, lengths=seg_src_lengths, state=state ) seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths)) enc_tokens, enc_lengths = segments_to_sequence( segments=seg_enc_tokens_lengths, time_axis=0 ) state = [] # returns trivial state return enc_tokens, enc_lengths, state @torch.jit.export def forward_jit( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: """ Forward helper for online decoding. args: input: [seg, right_context]. We assume in online we always padding the right context to the preset right context size. For the last segment, we may have short segment size, but right context size is the same as other segments lengths: utterance input length is the utterance segment length and right context size state: [memory, left_context_key, left_context_val]. To improve throughput, in addition to memory, we also cache key and value for left_context in multihead self-attention """ # In online decoding, input = [segment, right_context] # Lengths = [segment_length, right_context_length] # so we need strip right context in output T, B, D = input.size() rc_str = T - self.right_context rc_end = T right_context_blocks = input[rc_str:rc_end, :, :] seg = input[:rc_str, :, :] lengths = torch.clamp(lengths - self.right_context, min=0) mems_list = [] lc_key_list = [] lc_val_list = [] results = self.forward_jit_mini_batch_init(seg, state, True) state, mems, state_mems, past_length, past_left_context = results # relative position embedding if self.use_rpe: rpe = self._get_relative_position( input=input, max_relative_position=self.max_relative_position, left_context_length=past_left_context, past_length=past_length, is_decoding=True, ) else: rpe = None # memory for first layer. mems_list.append(state_mems[-self.max_memory_size :, :, :]) output = seg i = 0 for layer in self.layers: # In order to make cross stream batching work, mem, left context key # and left context value in the state should always be the same shape. # We use the past length to track the processed segment number. In this # way, we take out the essential memory, left context key and left # context val from the state. After finish the forward for current segment # we add the new memory, left context key and left context value into the # staate and trim out the oldest part to keep the shape consistent. true_mems, lc_key, lc_val = self.state_update_before( layer=i, state=state, past_length=past_length, past_left_context=past_left_context, ) output, mems, right_context_blocks, next_key, next_val = layer.forward_jit( input=output, lengths=lengths, mems=true_mems, right_context_blocks=right_context_blocks, left_context_key=lc_key, left_context_val=lc_val, rpe=rpe, ) # mems is used for next layer mems_list, lc_key_list, lc_val_list, _ = self.state_update_after( layer=i, state=state, mems_list=mems_list, mems=mems, next_key=next_key, next_val=next_val, lc_key_list=lc_key_list, lc_val_list=lc_val_list, ) i += 1 # update state state = self.state_update_after_loop( state=state, mems_list=mems_list, lc_key_list=lc_key_list, lc_val_list=lc_val_list, update_length=1, ) return output, lengths, state def quantize_(self, params=None): if params and "per_channel" in params and params["per_channel"]: qconfig = per_channel_dynamic_qconfig else: qconfig = default_dynamic_qconfig quantization.quantize_dynamic( self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True ) return self # ------------------------------------------------------------------------------ # Emformer encoder for seq2seq model # This is a wrapper over the original emformer # ------------------------------------------------------------------------------ def emformer_encoder(klass): class SpeechEncoder(klass): def __init__(self, args): super().__init__(args) stride = SpeechEncoder.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) def forward(self, src_tokens, src_lengths): encoder_out = super().forward(src_tokens, src_lengths) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"][0] # This is because that in the original implementation # the output didn't consider the last segment as right context. encoder_padding_masks = encoder_padding_masks[:, : output.size(0)] return { "encoder_out": [output], "encoder_padding_mask": [encoder_padding_masks], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 SpeechEncoder.__name__ = klass.__name__ return SpeechEncoder
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/modules/emformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Tuple, List import torch import torch.nn.functional as F from fairseq.models import FairseqEncoder from fairseq.models.speech_to_text import ( ConvTransformerEncoder, ) from fairseq.models.speech_to_text.utils import attention_suppression from fairseq.models.speech_to_text.utils import ( lengths_to_encoder_padding_mask, segments_to_sequence, sequence_to_segments, ) from fairseq.modules import MultiheadAttention, TransformerEncoderLayer from torch import nn, Tensor # ------------------------------------------------------------------------------ # AugmentedMemoryConvTransformerEncoder # ------------------------------------------------------------------------------ class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder): def __init__(self, args): super().__init__(args) args.encoder_stride = self.stride() self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride self.left_context_after_stride = args.left_context // args.encoder_stride self.right_context_after_stride = args.right_context // args.encoder_stride self.transformer_layers = nn.ModuleList([]) self.transformer_layers.extend( [ AugmentedMemoryTransformerEncoderLayer(args) for i in range(args.encoder_layers) ] ) def stride(self): # Hard coded here. Should infer from convs in future stride = 4 return stride def forward(self, src_tokens, src_lengths, states=None): """Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: """ bsz, max_seq_len, _ = src_tokens.size() x = ( src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) .transpose(1, 2) .contiguous() ) x = self.conv(x) bsz, _, output_seq_len, _ = x.size() x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1) x = self.out(x) x = self.embed_scale * x subsampling_factor = 1.0 * max_seq_len / output_seq_len input_lengths = torch.max( (src_lengths.float() / subsampling_factor).ceil().long(), x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(), ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( input_lengths, batch_first=True ) # TODO: fix positional embedding positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) x += positions x = F.dropout(x, p=self.dropout, training=self.training) # State to store memory banks etc. if states is None: states = [ {"memory_banks": None, "encoder_states": None} for i in range(len(self.transformer_layers)) ] for i, layer in enumerate(self.transformer_layers): # x size: # (self.left_size + self.segment_size + self.right_size) # / self.stride, num_heads, dim # TODO: Consider mask here x = layer(x, states[i]) states[i]["encoder_states"] = x[ self.left_context_after_stride : -self.right_context_after_stride ] lengths = ( ( ~encoder_padding_mask[ :, self.left_context_after_stride : -self.right_context_after_stride ] ) .sum(dim=1, keepdim=True) .long() ) return states[-1]["encoder_states"], lengths, states # ------------------------------------------------------------------------------ # AugmentedMemoryTransformerEncoderLayer # ------------------------------------------------------------------------------ class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer): def __init__(self, args): super().__init__(args) self.left_context = args.left_context // args.encoder_stride self.right_context = args.right_context // args.encoder_stride def forward(self, x, state): length, batch_size, x_dim = x.size() residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) # init_state if state.get("memory_banks", None) is None: state["memory_banks"] = [] # TODO reseach new sum_query method seg_start = self.left_context seg_end = length - self.right_context if seg_start < seg_end: summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0) else: summarization_query = x.new_zeros(1, batch_size, x_dim) x = torch.cat([x, summarization_query], dim=0) x = self.self_attn(input_and_summary=x, state=state) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) return x def build_self_attention(self, embed_dim, args): return AugmentedMemoryMultiheadAttention( embed_dim=embed_dim, num_heads=args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, tanh_on_mem=True, max_memory_size=args.max_memory_size, ) # ------------------------------------------------------------------------------ # AugmentedMemoryMultiheadAttention # ------------------------------------------------------------------------------ class AugmentedMemoryMultiheadAttention(MultiheadAttention): """ Augmented Memory Attention from Streaming Transformer-based Acoustic Models Using Self-attention with Augmented Memory https://arxiv.org/abs/2005.08042 """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, tanh_on_mem=False, memory_dim=None, std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137 max_memory_size=-1, disable_mem_on_mem_attn=True, ): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, q_noise, qn_block_size, ) self.memory_dim = memory_dim if memory_dim is not None else embed_dim self.std_scale = std_scale self.disable_mem_on_mem_attn = disable_mem_on_mem_attn # This Operator was used for factorization in PySpeech self.v2e = lambda x: x if tanh_on_mem: self.squash_mem = torch.tanh self.nonlinear_squash_mem = True else: self.squash_mem = lambda x: x self.nonlinear_squash_mem = False self.max_memory_size = max_memory_size def forward(self, input_and_summary, state): """ input: Encoder states of current segment with left or right context, plus one summarization query """ length, batch_size, _ = input_and_summary.shape length = length - 1 # not include sum_query, last index memory = state["memory_banks"] # TODO: positional embedding on memory if self.max_memory_size > -1 and len(memory) > self.max_memory_size: # TODO: need to fix here if self.max_memory_size == 0: memory = memory.new_zeros(1, memory.size(1), self.memory_dim) else: memory = memory[-self.max_memory_size :] memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0) input_and_sum_query = input_and_summary q = self.q_proj(self.v2e(input_and_sum_query)) k = self.k_proj(self.v2e(memory_and_input)) v = self.v_proj(self.v2e(memory_and_input)) q = ( q.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) * self.scaling ) k = ( k.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) v = ( v.contiguous() .view(-1, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) attention_weights = torch.bmm(q, k.transpose(1, 2)) if self.disable_mem_on_mem_attn: attention_weights = self.suppress_mem_on_mem_attention( batch_size, self.num_heads, len(memory), attention_weights ) if self.std_scale is not None: attention_weights = attention_suppression(attention_weights, self.std_scale) assert list(attention_weights.shape) == [ batch_size * self.num_heads, length + 1, length + len(memory), ] attention_weights = torch.nn.functional.softmax( attention_weights.float(), dim=-1 ).type_as(attention_weights) attention_probs = self.dropout_module(attention_weights) # [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head] attention = torch.bmm(attention_probs, v) assert list(attention.shape) == [ batch_size * self.num_heads, length + 1, self.head_dim, ] attention = ( attention.transpose(0, 1) .contiguous() .view(length + 1, batch_size, self.embed_dim) ) output_and_memory = self.out_proj(attention) next_m = output_and_memory[-1:] next_m = self.squash_mem(next_m) output = output_and_memory[:-1] state["memory_banks"].append(next_m) return output def suppress_mem_on_mem_attention( self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor ): """ Arguments: - B: batch size - num_heads: number of attention heads - mem_size: size of memory bank - attention_weight: a [B*num_heads, T + 1, T + mem_size] vector Return: modified attention_weight with [B*num_heads, -1, :mem_size] = -inf """ attention_weight[:, -1, :mem_size] = float("-inf") return attention_weight # ------------------------------------------------------------------------------ # SequenceEncoder # ------------------------------------------------------------------------------ class SequenceEncoder(FairseqEncoder): """ SequenceEncoder encodes sequences. More specifically, `src_tokens` and `src_lengths` in `forward()` should describe a batch of "complete" sequences rather than segments. Segment-by-segment inference can be triggered by `segment_size`: 1) `segment_size` is None: SequenceEncoder treats the input sequence as one single segment. 2) `segment_size` is not None (some int instead): SequenceEncoder does the following: 1. breaks the input sequence into several segments 2. inference on each segment and collect the outputs 3. concatanete segment outputs into the output sequence. Note that `segment_size` here shouldn't include additional left/right contexts needed, for example if we wish to infer with LC-BLSTM where the middle chunk size is 100 and right context is 20, `segment_size` should be 100. """ def __init__(self, args, module): super().__init__(None) self.module = module self.input_time_axis = 1 self.output_time_axis = 0 self.segment_size = args.segment_size self.left_context = args.left_context self.right_context = args.right_context def forward( self, src_tokens: Tensor, src_lengths: Tensor, states=None, ): seg_src_tokens_lengths = sequence_to_segments( sequence=src_tokens, time_axis=self.input_time_axis, lengths=src_lengths, segment_size=self.segment_size, extra_left_context=self.left_context, extra_right_context=self.right_context, ) seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = [] for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths: (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths)) encoder_out, enc_lengths = segments_to_sequence( segments=seg_encoder_states_lengths, time_axis=self.output_time_axis ) encoder_padding_mask, _ = lengths_to_encoder_padding_mask( enc_lengths, batch_first=True ) if not encoder_padding_mask.any(): encoder_padding_mask = None return { "encoder_out": [encoder_out], "encoder_padding_mask": [encoder_padding_mask], "encoder_embedding": [], "encoder_states": [states], "src_tokens": [], "src_lengths": [], } def incremental_encode( self, seg_src_tokens: Tensor, seg_src_lengths: Tensor, states=None, ): """ Different from forward function, this function takes segmented speech as input, and append encoder states to previous states """ (seg_encoder_states, seg_enc_lengths, states) = self.module( seg_src_tokens, seg_src_lengths, states=states, ) return seg_encoder_states, seg_enc_lengths, states # ------------------------------------------------------------------------------ # Augmented memory model decorator # ------------------------------------------------------------------------------ def augmented_memory(klass): class StreamSeq2SeqModel(klass): @staticmethod def add_args(parser): super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser) parser.add_argument( "--segment-size", type=int, required=True, help="Length of the segment." ) parser.add_argument( "--left-context", type=int, default=0, help="Left context for the segment.", ) parser.add_argument( "--right-context", type=int, default=0, help="Right context for the segment.", ) parser.add_argument( "--max-memory-size", type=int, default=-1, help="Right context for the segment.", ) StreamSeq2SeqModel.__name__ = klass.__name__ return StreamSeq2SeqModel
KosmosX-API-main
kosmosX/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hubert import * # noqa from .hubert_asr import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/hubert/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from argparse import Namespace from typing import Any import torch import torch.nn as nn from dataclasses import dataclass, field from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES from fairseq.tasks import FairseqTask from omegaconf import II, MISSING @dataclass class HubertAsrConfig(FairseqDataclass): w2v_path: str = field(default=MISSING, metadata={"help": "path to hubert model"}) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={"help": "dropout after transformer and before final projection"}, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside hubert model"}, ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside hubert model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside hubert model" }, ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask " "(normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune hubert for this many updates"}, ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in hubert to this"}, ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in hubert"}, ) normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded hubert args w2v_args: Any = None @dataclass class HubertCtcConfig(HubertAsrConfig): pass @register_model("hubert_ctc", dataclass=HubertCtcConfig) class HubertCtc(BaseFairseqModel): def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = HubertEncoder(cfg, task.target_dictionary) return cls(cfg, w2v_encoder) def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def get_logits(self, net_output): logits = net_output["encoder_out"] padding = net_output["encoder_padding_mask"] if padding is not None and padding.any(): padding = padding.T logits[padding][..., 0] = 0 logits[padding][..., 1:] = float("-inf") return logits def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class HubertSeq2SeqConfig(HubertAsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"}, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings " "(outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights " "inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN " "inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"}, ) class HubertEncoder(FairseqEncoder): def __init__(self, cfg: HubertAsrConfig, tgt_dict=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for " "both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) if state is not None and "task_state" in state: # This will load the stored "dictionaries" object task.load_state_dict(state["task_state"]) model = task.build_model(w2v_args.model, from_checkpoint=True) if state is not None and not cfg.no_pretrained_weights: # set strict=False because we omit some modules model.load_state_dict(state["model"], strict=False) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 if tgt_dict is not None: self.proj = Linear(d, len(tgt_dict)) elif getattr(cfg, "decoder_embed_dim", d) != d: self.proj = Linear(d, cfg.decoder_embed_dim) else: self.proj = None def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, tbc=True, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): x, padding_mask = self.w2v_model.extract_features(**w2v_args) if tbc: # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": padding_mask, # B x T "padding_mask": padding_mask, } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
KosmosX-API-main
kosmosX/fairseq/fairseq/models/hubert/hubert_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn as nn from omegaconf import II from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.data.dictionary import Dictionary from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.models.wav2vec.wav2vec2 import ( ConvFeatureExtractionModel, TransformerEncoder, ) from fairseq.modules import GradMultiply, LayerNorm from fairseq.tasks.hubert_pretraining import ( HubertPretrainingConfig, HubertPretrainingTask, ) logger = logging.getLogger(__name__) EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"]) @dataclass class HubertConfig(FairseqDataclass): label_rate: int = II("task.label_rate") extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group " "norm with d groups in the first conv block, whereas layer_norm " "has layer norms in every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"}, ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"}, ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"}, ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}, ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features (after feat extr)"}, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many " "dimensions. set to encoder_embed_dim is <= 0" }, ) untie_final_proj: bool = field( default=False, metadata={"help": "use separate projection for each target"}, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"}, ) conv_feature_layers: str = field( default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2", metadata={ "help": "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"}, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"}, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"}, ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"}, ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument " "(used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"}, ) mask_channel_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # positional embeddings conv_pos: int = field( default=128, metadata={"help": "number of filters for convolutional positional embeddings"}, ) conv_pos_groups: int = field( default=16, metadata={"help": "number of groups for convolutional positional embedding"}, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={"help": "legacy (to be removed)"}, ) # loss computation skip_masked: bool = field( default=False, metadata={"help": "skip computing losses over masked frames"}, ) skip_nomask: bool = field( default=False, metadata={"help": "skip computing losses over unmasked frames"}, ) checkpoint_activations: bool = field( default=False, metadata={"help": "recompute activations and save memory for extra compute"}, ) @register_model("hubert", dataclass=HubertConfig) class HubertModel(BaseFairseqModel): def __init__( self, cfg: HubertConfig, task_cfg: HubertPretrainingConfig, dictionaries: List[Dictionary], ) -> None: super().__init__() logger.info(f"HubertModel Config: {cfg}") feature_enc_layers = eval(cfg.conv_feature_layers) # noqa self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers]) self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim else None ) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.logit_temp = cfg.logit_temp self.skip_masked = cfg.skip_masked self.skip_nomask = cfg.skip_nomask final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.untie_final_proj = cfg.untie_final_proj if self.untie_final_proj: self.final_proj = nn.Linear( cfg.encoder_embed_dim, final_dim * len(dictionaries) ) else: self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) # modules below are not needed during fine-tuning if any([d is None for d in dictionaries]): logger.info("cannot find dictionary. assume will be used for fine-tuning") else: self.num_classes = [len(d) for d in dictionaries] self.label_embs_concat = nn.Parameter( torch.FloatTensor(sum(self.num_classes), final_dim) ) nn.init.uniform_(self.label_embs_concat) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask): """Build a new model instance.""" model = HubertModel(cfg, task.cfg, task.dictionaries) return model def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape if self.mask_prob > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x[mask_indices] = self.mask_emb else: mask_indices = None if self.mask_channel_prob > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices def compute_nce(self, x, pos, negs): neg_is_pos = (pos == negs).all(-1) pos = pos.unsqueeze(0) targets = torch.cat([pos, negs], dim=0) logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) logits /= self.logit_temp if neg_is_pos.any(): logits[1:][neg_is_pos] = float("-inf") logits = logits.transpose(0, 1) # (num_x, num_cls+1) return logits def forward_features(self, source: torch.Tensor) -> torch.Tensor: if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) return features def forward_targets( self, features: torch.Tensor, target_list: List[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: # Trim features to ensure labels exist and then get aligned labels feat_tsz = features.size(2) targ_tsz = min([t.size(1) for t in target_list]) if self.feat2tar_ratio * feat_tsz > targ_tsz: feat_tsz = int(targ_tsz / self.feat2tar_ratio) features = features[..., :feat_tsz] target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio target_list = [t[:, target_inds.long()] for t in target_list] return features, target_list def forward_padding_mask( self, features: torch.Tensor, padding_mask: torch.Tensor, ) -> torch.Tensor: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1) padding_mask = padding_mask.all(-1) return padding_mask def forward( self, source: torch.Tensor, target_list: Optional[List[torch.Tensor]] = None, padding_mask: Optional[torch.Tensor] = None, mask: bool = True, features_only: bool = False, output_layer: Optional[int] = None, ) -> Dict[str, torch.Tensor]: """output layer is 1-based""" features = self.forward_features(source) if target_list is not None: features, target_list = self.forward_targets(features, target_list) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None: padding_mask = self.forward_padding_mask(features, padding_mask) if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) if mask: x, mask_indices = self.apply_mask(features, padding_mask, target_list) else: x = features mask_indices = None # feature: (B, T, D), float # target: (B, T), long # x: (B, T, D), float # padding_mask: (B, T), bool # mask_indices: (B, T), bool x, _ = self.encoder( x, padding_mask=padding_mask, layer=None if output_layer is None else output_layer - 1, ) if features_only: return {"x": x, "padding_mask": padding_mask, "features": features} def compute_pred(proj_x, target, label_embs): # compute logits for the i-th label set y = torch.index_select(label_embs, 0, target.long()) negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) # proj_x: (S, D) # y: (S, D) # negs: (Neg, S, D) return self.compute_nce(proj_x, y, negs) label_embs_list = self.label_embs_concat.split(self.num_classes, 0) if not self.skip_masked: masked_indices = torch.logical_and(~padding_mask, mask_indices) proj_x_m = self.final_proj(x[masked_indices]) if self.untie_final_proj: proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1) else: proj_x_m_list = [proj_x_m for _ in range(len(target_list))] logit_m_list = [ compute_pred(proj_x_m, t[masked_indices], label_embs_list[i]) for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list)) ] else: logit_m_list = [None for _ in target_list] if not self.skip_nomask: nomask_indices = torch.logical_and(~padding_mask, ~mask_indices) proj_x_u = self.final_proj(x[nomask_indices]) if self.untie_final_proj: proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1) else: proj_x_u_list = [proj_x_u for _ in range(len(target_list))] logit_u_list = [ compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i]) for i, (proj_x_u, t) in enumerate(zip(proj_x_u_list, target_list)) ] else: logit_u_list = [None for _ in target_list] result = { "logit_m_list": logit_m_list, "logit_u_list": logit_u_list, "padding_mask": padding_mask, "features_pen": features_pen, } return result def extract_features( self, source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, mask: bool = False, ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: res = self.forward( source, padding_mask=padding_mask, mask=mask, features_only=True, output_layer=output_layer, ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] def get_logits(self, net_output, is_masked=True): if is_masked: logits_list = net_output["logit_m_list"] else: logits_list = net_output["logit_u_list"] logits_list = [x.float() for x in logits_list if x is not None] return logits_list def get_targets(self, net_output, is_masked=True): logits_list = self.get_logits(net_output, is_masked) targets_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list] return targets_list def get_extra_losses(self, net_output): extra_losses = [] names = [] if "features_pen" in net_output: extra_losses.append(net_output["features_pen"]) names.append("features_pen") return extra_losses, names def remove_pretraining_modules(self): self.target_glu = None self.final_proj = None
KosmosX-API-main
kosmosX/fairseq/fairseq/models/hubert/hubert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from dataclasses import dataclass, field, fields from typing import List, Optional from omegaconf import II from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.utils import safe_getattr, safe_hasattr DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8) _NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)" @dataclass class EncDecBaseConfig(FairseqDataclass): embed_path: Optional[str] = field( default=None, metadata={"help": "path to pre-trained embedding"} ) embed_dim: Optional[int] = field( default=512, metadata={"help": "embedding dimension"} ) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) layers: int = field(default=6, metadata={"help": "number of layers"}) attention_heads: int = field( default=8, metadata={"help": "number of attention heads"} ) normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each block"} ) learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings"} ) # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"}) layers_to_keep: Optional[List[int]] = field( default=None, metadata={"help": "which layers to *keep* when pruning"} ) @dataclass class DecoderConfig(EncDecBaseConfig): input_dim: int = II("model.decoder.embed_dim") output_dim: int = field( default=II("model.decoder.embed_dim"), metadata={ "help": "decoder output dimension (extra linear layer if different from decoder embed dim)" }, ) def __post_init__(self): # II doesn't work if we are just creating the object outside of hydra so fix that if self.input_dim == II("model.decoder.embed_dim"): self.input_dim = self.embed_dim if self.output_dim == II("model.decoder.embed_dim"): self.output_dim = self.embed_dim @dataclass class QuantNoiseConfig(FairseqDataclass): pq: float = field( default=0.0, metadata={"help": "iterative PQ quantization noise at training time"}, ) pq_block_size: int = field( default=8, metadata={"help": "block size of quantization noise at training time"}, ) scalar: float = field( default=0.0, metadata={ "help": "scalar quantization noise and scalar quantization at training time" }, ) @dataclass class TransformerConfig(FairseqDataclass): activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="relu", metadata={"help": "activation function to use"}, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN.", "alias": "--relu-dropout", }, ) adaptive_input: bool = False encoder: EncDecBaseConfig = EncDecBaseConfig() # TODO should really be in the encoder config max_source_positions: int = field( default=DEFAULT_MAX_SOURCE_POSITIONS, metadata={"help": "Maximum input length supported by the encoder"}, ) decoder: DecoderConfig = DecoderConfig() # TODO should really be in the decoder config max_target_positions: int = field( default=DEFAULT_MAX_TARGET_POSITIONS, metadata={"help": "Maximum output length supported by the decoder"}, ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) share_all_embeddings: bool = field( default=False, metadata={ "help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)" }, ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if True, disables positional embeddings (outside self attention)" }, ) adaptive_softmax_cutoff: Optional[List[int]] = field( default=None, metadata={ "help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion" }, ) adaptive_softmax_dropout: float = field( default=0.0, metadata={"help": "sets adaptive softmax dropout for the tail projections"}, ) adaptive_softmax_factor: float = field( default=4, metadata={"help": "adaptive input factor"} ) layernorm_embedding: bool = field( default=False, metadata={"help": "add layernorm to embedding"} ) tie_adaptive_weights: bool = field( default=False, metadata={ "help": "if set, ties the weights of adaptive softmax and adaptive input" }, ) tie_adaptive_proj: bool = field( default=False, metadata={ "help": "if set, ties the projection weights of adaptive softmax and adaptive input" }, ) no_scale_embedding: bool = field( default=False, metadata={"help": "if True, dont scale embeddings"} ) checkpoint_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute" }, ) offload_activations: bool = field( default=False, metadata={ "help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations." }, ) # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) no_cross_attention: bool = field( default=False, metadata={"help": "do not perform cross-attention"} ) cross_self_attention: bool = field( default=False, metadata={"help": "perform cross+self-attention"} ) # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig()) min_params_to_wrap: int = field( default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={ "help": "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." }, ) # DEPRECATED field, but some old checkpoints might have it char_inputs: bool = field( default=False, metadata={"help": "if set, model takes character ids as input"} ) relu_dropout: float = 0.0 # config for "BASE Layers: Simplifying Training of Large, Sparse Models" base_layers: Optional[int] = field( default=0, metadata={"help": "number of BASE layers in total"} ) base_sublayers: Optional[int] = field( default=1, metadata={"help": "number of sublayers in each BASE layer"} ) base_shuffle: Optional[int] = field( default=1, metadata={"help": "shuffle tokens between workers before computing assignment"}, ) export: bool = field( default=False, metadata={"help": "make the layernorm exportable with torchscript."}, ) # copied from transformer_lm but expected in transformer_decoder: no_decoder_final_norm: bool = field( default=False, metadata={"help": "don't add an extra layernorm after the last decoder block"}, ) deepnet: bool = field( default=False, metadata={ "help": "enable deepnet in decoder" }, ) last_ln_scale: bool = field( default=False, metadata={ "help": "enable last_ln_scale in decoder" }, ) # We need to make this hierarchical dataclass like the flat namespace # __getattr__ and __setattr__ here allow backward compatibility # for subclasses of Transformer(Legacy) that depend on read/write on # the flat namespace. def __getattr__(self, name): match = re.match(_NAME_PARSER, name) if match: sub = safe_getattr(self, match[1]) return safe_getattr(sub, match[2]) raise AttributeError(f"invalid argument {name}.") def __setattr__(self, name, value): match = re.match(_NAME_PARSER, name) if match: sub = safe_getattr(self, match[1]) setattr(sub, match[2], value) else: super().__setattr__(name, value) @staticmethod def _copy_keys(args, cls, prefix, seen): """ copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim """ cfg = cls() for fld in fields(cls): # for all the fields in the DC, find the fields (e.g. embed_dim) # in the namespace with the prefix (e.g. decoder) # and set it on the dc. args_key = f"{prefix}_{fld.name}" if safe_hasattr(args, args_key): seen.add(args_key) setattr(cfg, fld.name, safe_getattr(args, args_key)) if safe_hasattr(args, fld.name): seen.add(fld.name) setattr(cfg, fld.name, safe_getattr(args, fld.name)) return cfg @classmethod def from_namespace(cls, args): if args is None: return None if not isinstance(args, cls): seen = set() config = cls() # currently, we can go generically from DC fields to args hierarchically # but we can't easily deconstruct a flat namespace to a hierarchical # DC. Mostly because we could have a sub-dc called `decoder-foo` that should not # go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple # for now. for fld in fields(cls): # concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields # and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()` if fld.name == "decoder": if safe_hasattr(args, "decoder"): # in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC seen.add("decoder") config.decoder = DecoderConfig(**args.decoder) else: config.decoder = cls._copy_keys( args, DecoderConfig, "decoder", seen ) elif fld.name == "encoder": # same but for encoder if safe_hasattr(args, "encoder"): seen.add("encoder") config.encoder = EncDecBaseConfig(**args.encoder) else: config.encoder = cls._copy_keys( args, EncDecBaseConfig, "encoder", seen ) elif fld.name == "quant_noise": # same but for quant_noise if safe_hasattr(args, "quant_noise"): seen.add("quant_noise") config.quant_noise = QuantNoiseConfig(**args.quant_noise) else: config.quant_noise = cls._copy_keys( args, QuantNoiseConfig, "quant_noise", seen ) elif safe_hasattr(args, fld.name): # if it's not a structure field, it's just a normal field, copy it over seen.add(fld.name) setattr(config, fld.name, safe_getattr(args, fld.name)) # we got all the fields defined in the dataclass, but # the argparse namespace might have extra args for two reasons: # - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this # - some places expect args to be there but never define them args_dict = ( args._asdict() if safe_hasattr(args, "_asdict") else vars(args) if safe_hasattr(args, "__dict__") else {} ) # namedtupled doesn't have __dict__ :-/ for key, value in args_dict.items(): if key not in seen: setattr(config, key, value) return config else: return args
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/transformer_config.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer.transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from fairseq.models.transformer.transformer_base import ( TransformerModelBase, ) @register_model("transformer") class TransformerModel(TransformerModelBase): """ This is the legacy implementation of the transformer model that uses argparse for configuration. """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } def spm(path): return { 'path': path, 'bpe': 'sentencepiece', 'tokenizer': 'space', } return { 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), 'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'), 'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'), 'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'), 'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'), 'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'), 'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'), 'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'), 'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'), } # fmt: on def __init__(self, args, encoder, decoder): cfg = TransformerConfig.from_namespace(args) super().__init__(cfg, encoder, decoder) self.args = args @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. # do not set defaults so that settings defaults from various architectures still works gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=True, with_prefix="" ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, "max_target_positions", None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) args.share_decoder_input_output_embed = True if getattr(args, "offload_activations", False): args.checkpoint_activations = True # offloading implies checkpointing if not args.share_all_embeddings: args.min_params_to_wrap = getattr( args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP ) cfg = TransformerConfig.from_namespace(args) return super().build_model(cfg, task) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): return super().build_embedding( TransformerConfig.from_namespace(args), dictionary, embed_dim, path ) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return super().build_encoder( TransformerConfig.from_namespace(args), src_dict, embed_tokens ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return super().build_decoder( TransformerConfig.from_namespace(args), tgt_dict, embed_tokens ) # architectures @register_model_architecture("transformer", "transformer_tiny") def tiny_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64) args.encoder_layers = getattr(args, "encoder_layers", 2) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 2) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) return base_architecture(args) @register_model_architecture("transformer", "transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) @register_model_architecture("transformer", "transformer_iwslt_de_en") def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) @register_model_architecture("transformer", "transformer_wmt_en_de") def transformer_wmt_en_de(args): base_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big") def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big") def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture("transformer", "transformer_wmt_en_de_big") def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t") def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/transformer_legacy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == "TransformerEncoderBase": return "TransformerEncoder" else: return module_name class TransformerEncoderBase(FairseqEncoder): """ Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, cfg, dictionary, embed_tokens): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.encoder_layerdrop = cfg.encoder.layerdrop embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = cfg.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( cfg.max_source_positions, embed_dim, self.padding_idx, learned=cfg.encoder.learned_pos, ) if not cfg.no_token_positional_embeddings else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None if self.encoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.encoder_layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)] ) self.num_layers = len(self.layers) if cfg.encoder.normalize_before: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None def build_encoder_layer(self, cfg): layer = transformer_layer.TransformerEncoderLayerBase(cfg) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward_embedding( self, src_tokens, token_embedding: Optional[torch.Tensor] = None ): # embed tokens and positions if token_embedding is None: token_embedding = self.embed_tokens(src_tokens) x = embed = self.embed_scale * token_embedding if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) if self.quant_noise is not None: x = self.quant_noise(x) return x, embed def forward( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ return self.forward_scriptable( src_tokens, src_lengths, return_all_hiddens, token_embeddings ) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def forward_scriptable( self, src_tokens, src_lengths: Optional[torch.Tensor] = None, return_all_hiddens: bool = False, token_embeddings: Optional[torch.Tensor] = None, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). token_embeddings (torch.Tensor, optional): precomputed embeddings default `None` will recompute embeddings Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any() x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings) # account for padding while computing the representation if has_pads: x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if return_all_hiddens: encoder_states.append(x) # encoder layers for layer in self.layers: x = layer( x, encoder_padding_mask=encoder_padding_mask if has_pads else None ) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x) # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `forward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. src_lengths = ( src_tokens.ne(self.padding_idx) .sum(dim=1, dtype=torch.int32) .reshape(-1, 1) .contiguous() ) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [encoder_padding_mask], # B x T "encoder_embedding": [encoder_embedding], # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": [], "src_lengths": [src_lengths], } @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if len(encoder_out["encoder_out"]) == 0: new_encoder_out = [] else: new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)] if len(encoder_out["encoder_padding_mask"]) == 0: new_encoder_padding_mask = [] else: new_encoder_padding_mask = [ encoder_out["encoder_padding_mask"][0].index_select(0, new_order) ] if len(encoder_out["encoder_embedding"]) == 0: new_encoder_embedding = [] else: new_encoder_embedding = [ encoder_out["encoder_embedding"][0].index_select(0, new_order) ] if len(encoder_out["src_tokens"]) == 0: src_tokens = [] else: src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)] if len(encoder_out["src_lengths"]) == 0: src_lengths = [] else: src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)] encoder_states = encoder_out["encoder_states"] if len(encoder_states) > 0: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return { "encoder_out": new_encoder_out, # T x B x C "encoder_padding_mask": new_encoder_padding_mask, # B x T "encoder_embedding": new_encoder_embedding, # B x T x C "encoder_states": encoder_states, # List[T x B x C] "src_tokens": src_tokens, # B x T "src_lengths": src_lengths, # B x 1 } def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: print("deleting {0}".format(weights_key)) del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(self.num_layers): # update layer norms self.layers[i].upgrade_state_dict_named( state_dict, "{}.layers.{}".format(name, i) ) version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoder(TransformerEncoderBase): def __init__(self, args, dictionary, embed_tokens): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, ) def build_encoder_layer(self, args): return super().build_encoder_layer( TransformerConfig.from_namespace(args), )
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/transformer_encoder.py
# Copyright (c) Facebook Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .transformer_config import ( TransformerConfig, DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS, DEFAULT_MIN_PARAMS_TO_WRAP, ) from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear from .transformer_encoder import TransformerEncoder, TransformerEncoderBase from .transformer_legacy import ( TransformerModel, base_architecture, tiny_architecture, transformer_iwslt_de_en, transformer_wmt_en_de, transformer_vaswani_wmt_en_de_big, transformer_vaswani_wmt_en_fr_big, transformer_wmt_en_de_big, transformer_wmt_en_de_big_t2t, ) from .transformer_base import TransformerModelBase, Embedding __all__ = [ "TransformerModelBase", "TransformerConfig", "TransformerDecoder", "TransformerDecoderBase", "TransformerEncoder", "TransformerEncoderBase", "TransformerModel", "Embedding", "Linear", "base_architecture", "tiny_architecture", "transformer_iwslt_de_en", "transformer_wmt_en_de", "transformer_vaswani_wmt_en_de_big", "transformer_vaswani_wmt_en_fr_big", "transformer_wmt_en_de_big", "transformer_wmt_en_de_big_t2t", "DEFAULT_MAX_SOURCE_POSITIONS", "DEFAULT_MAX_TARGET_POSITIONS", "DEFAULT_MIN_PARAMS_TO_WRAP", ]
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor # rewrite name for backward compatibility in `make_generation_fast_` def module_name_fordropout(module_name: str) -> str: if module_name == "TransformerDecoderBase": return "TransformerDecoder" else: return module_name class TransformerDecoderBase(FairseqIncrementalDecoder): """ Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): self.cfg = cfg super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self._future_mask = torch.empty(0) self.dropout_module = FairseqDropout( cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__) ) self.decoder_layerdrop = cfg.decoder.layerdrop self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder.embed_dim self.embed_dim = embed_dim self.output_embed_dim = cfg.decoder.output_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim) if not cfg.adaptive_input and cfg.quant_noise.pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size, ) else: self.quant_noise = None self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( self.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder.learned_pos, ) if not cfg.no_token_positional_embeddings else None ) if cfg.layernorm_embedding: self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export) else: self.layernorm_embedding = None self.cross_self_attention = cfg.cross_self_attention if self.decoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.decoder_layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_decoder_layer(cfg, no_encoder_attn) for _ in range(cfg.decoder.layers) ] ) self.num_layers = len(self.layers) if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm: self.layer_norm = LayerNorm(embed_dim, export=cfg.export) else: self.layer_norm = None self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights else None ) self.adaptive_softmax = None self.output_projection = output_projection if self.output_projection is None: self.build_output_projection(cfg, dictionary, embed_tokens) if utils.safe_getattr(cfg, 'deepnet', False): self.rescale_decoder_only_parameters(cfg) def rescale_decoder_only_parameters(self, cfg): def rescale(param, layer_id): param.mul_(math.sqrt(math.log(len(self.layers) * 2))) # param.div_(math.sqrt(2.0 * layer_id)) for layer_id in range(len(self.layers)): layer = self.layers[layer_id] rescale(layer.self_attn.out_proj.weight.data, layer_id + 1) rescale(layer.self_attn.v_proj.weight.data, layer_id + 1) rescale(layer.fc1.weight.data, layer_id + 1) rescale(layer.fc2.weight.data, layer_id + 1) return def build_output_projection(self, cfg, dictionary, embed_tokens): if cfg.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int), dropout=cfg.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None, factor=cfg.adaptive_softmax_factor, tie_proj=cfg.tie_adaptive_proj, ) elif self.share_input_output_embed: self.output_projection = nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) self.output_projection.weight = self.embed_tokens.weight else: self.output_projection = nn.Linear( self.output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5 ) num_base_layers = cfg.base_layers for i in range(num_base_layers): self.layers.insert( ((i + 1) * cfg.decoder.layers) // (num_base_layers + 1), BaseLayer(cfg), ) def build_decoder_layer(self, cfg, no_encoder_attn=False): layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn) checkpoint = cfg.checkpoint_activations if checkpoint: offload_to_cpu = cfg.offload_activations layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu) # if we are checkpointing, enforce that FSDP always wraps the # checkpointed layer, regardless of layer size min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0 layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap) return layer def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention, should be of size T x B x C incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) if not features_only: x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): return self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) """ A scriptable subclass of this class has an extract_features method and calls super().extract_features, but super() is not supported in torchscript. A copy of this function is made to be used in the subclass instead. """ def extract_features_scriptable( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ bs, slen = prev_output_tokens.size() if alignment_layer is None: alignment_layer = self.num_layers - 1 enc: Optional[Tensor] = None padding_mask: Optional[Tensor] = None if encoder_out is not None and len(encoder_out["encoder_out"]) > 0: enc = encoder_out["encoder_out"][0] assert ( enc.size()[1] == bs ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}" if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0: padding_mask = encoder_out["encoder_padding_mask"][0] # embed positions positions = None if self.embed_positions is not None: positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, enc, padding_mask, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) if self.alpha is not None: x = torch.mul(self.alpha, x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def output_layer(self, features): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary return self.output_projection(features) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) self._future_mask = self._future_mask.to(tensor) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) if f"{name}.output_projection.weight" not in state_dict: if self.share_input_output_embed: embed_out_key = f"{name}.embed_tokens.weight" else: embed_out_key = f"{name}.embed_out" if embed_out_key in state_dict: state_dict[f"{name}.output_projection.weight"] = state_dict[ embed_out_key ] if not self.share_input_output_embed: del state_dict[embed_out_key] for i in range(self.num_layers): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m class TransformerDecoder(TransformerDecoderBase): def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None, ): self.args = args super().__init__( TransformerConfig.from_namespace(args), dictionary, embed_tokens, no_encoder_attn=no_encoder_attn, output_projection=output_projection, ) def build_output_projection(self, args, dictionary, embed_tokens): super().build_output_projection( TransformerConfig.from_namespace(args), dictionary, embed_tokens ) def build_decoder_layer(self, args, no_encoder_attn=False): return super().build_decoder_layer( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn )
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/transformer_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from fairseq import utils from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoderDecoderModel from fairseq.models.transformer import ( TransformerEncoderBase, TransformerDecoderBase, TransformerConfig, ) from torch import Tensor class TransformerModelBase(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, cfg, encoder, decoder): super().__init__(encoder, decoder) self.cfg = cfg self.supports_align_args = True @classmethod def add_args(cls, parser): """Add model-specific arguments to the parser.""" # we want to build the args recursively in this case. gen_parser_from_dataclass( parser, TransformerConfig(), delete_default=False, with_prefix="" ) @classmethod def build_model(cls, cfg, task): """Build a new model instance.""" # -- TODO T96535332 # bug caused by interaction between OmegaConf II and argparsing cfg.decoder.input_dim = int(cfg.decoder.input_dim) cfg.decoder.output_dim = int(cfg.decoder.output_dim) # -- if cfg.encoder.layers_to_keep: cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(",")) if cfg.decoder.layers_to_keep: cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(",")) src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if cfg.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if cfg.encoder.embed_dim != cfg.decoder.embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if cfg.decoder.embed_path and ( cfg.decoder.embed_path != cfg.encoder.embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = encoder_embed_tokens cfg.share_decoder_input_output_embed = True else: encoder_embed_tokens = cls.build_embedding( cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path ) decoder_embed_tokens = cls.build_embedding( cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path ) if cfg.offload_activations: cfg.checkpoint_activations = True # offloading implies checkpointing encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) if not cfg.share_all_embeddings: # fsdp_wrap is a no-op when --ddp-backend != fully_sharded encoder = fsdp_wrap(encoder, min_num_params=cfg.min_params_to_wrap) decoder = fsdp_wrap(decoder, min_num_params=cfg.min_params_to_wrap) return cls(cfg, encoder, decoder) @classmethod def build_embedding(cls, cfg, dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb @classmethod def build_encoder(cls, cfg, src_dict, embed_tokens): return TransformerEncoderBase(cfg, src_dict, embed_tokens) @classmethod def build_decoder(cls, cfg, tgt_dict, embed_tokens): return TransformerDecoderBase( cfg, tgt_dict, embed_tokens, no_encoder_attn=cfg.no_cross_attention, ) # TorchScript doesn't support optional arguments with variable length (**kwargs). # Current workaround is to add union of all arguments in child classes. def forward( self, src_tokens, src_lengths, prev_output_tokens, return_all_hiddens: bool = True, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Run the forward pass for an encoder-decoder model. Copied from the base class, but without ``**kwargs``, which are not supported by TorchScript. """ encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) return decoder_out # Since get_normalized_probs is in the Fairseq Model which is not scriptable, # I rewrite the get_normalized_probs from Base Class to call the # helper function in the Base Class. @torch.jit.export def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m
KosmosX-API-main
kosmosX/fairseq/fairseq/models/transformer/transformer_base.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.utils import new_arange # -------------- Helper Functions --------------------------------------------------- # def load_libnat(): try: from fairseq import libnat_cuda return libnat_cuda, True except ImportError as e: print(str(e) + "... fall back to CPU version") try: from fairseq import libnat return libnat, False except ImportError as e: import sys sys.stderr.write( "ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n" ) raise e def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat, use_cuda = load_libnat() def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( out_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) masked_tgt_masks = masked_tgt_masks.bool() & out_masks mask_ins_targets = mask_ins_targets.type_as(in_tokens)[ :, 1 : in_masks.size(1) ].masked_fill_(~in_masks[:, 1:], 0) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets if use_cuda: return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx) def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat, use_cuda = load_libnat() def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) word_del_targets = libnat.generate_deletion_labels( in_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_( ~in_masks, 0 ) return word_del_targets def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx): out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets if use_cuda: return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx) return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx) def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1] out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering) return out_tokens, out_scores, out_attn def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out( encoder_out, mask.nonzero(as_tuple=False).squeeze() ) def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, : y.size(1)] = y else: x[mask, : y.size(1), :] = y else: x[mask] = y return x
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/levenshtein_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s def gumbel_noise(input, TINY=1e-8): return ( input.new_zeros(*input.size()) .uniform_() .add_(TINY) .log_() .neg_() .add_(TINY) .log_() .neg_() ) @register_model("iterative_nonautoregressive_transformer") class IterNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--train-step", type=int, help="number of refinement iterations during training", ) parser.add_argument( "--dae-ratio", type=float, help="the probability of switching to the denoising auto-encoder loss", ) parser.add_argument( "--stochastic-approx", action="store_true", help="sampling from the decoder as the inputs for next iteration", ) @classmethod def build_model(cls, args, task): model = super().build_model(args, task) model.train_step = getattr(args, "train_step", 4) model.dae_ratio = getattr(args, "dae_ratio", 0.5) model.stochastic_approx = getattr(args, "stochastic_approx", False) return model def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): B, T = prev_output_tokens.size() # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_outs, word_ins_tgts, word_ins_masks = [], [], [] for t in range(self.train_step): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, step=t, ) word_ins_tgt = tgt_tokens word_ins_mask = word_ins_tgt.ne(self.pad) word_ins_outs.append(word_ins_out) word_ins_tgts.append(word_ins_tgt) word_ins_masks.append(word_ins_mask) if t < (self.train_step - 1): # prediction for next iteration if self.stochastic_approx: word_ins_prediction = ( word_ins_out + gumbel_noise(word_ins_out) ).max(-1)[1] else: word_ins_prediction = word_ins_out.max(-1)[1] prev_output_tokens = prev_output_tokens.masked_scatter( word_ins_mask, word_ins_prediction[word_ins_mask] ) if self.dae_ratio > 0: # we do not perform denoising for the first iteration corrputed = ( torch.rand(size=(B,), device=prev_output_tokens.device) < self.dae_ratio ) corrputed_tokens = _sequential_poisoning( tgt_tokens[corrputed], len(self.tgt_dict), 0.33, self.bos, self.eos, self.pad, ) prev_output_tokens[corrputed] = corrputed_tokens # concat everything word_ins_out = torch.cat(word_ins_outs, 0) word_ins_tgt = torch.cat(word_ins_tgts, 0) word_ins_mask = torch.cat(word_ins_masks, 0) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer" ) def inat_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", ) def iter_nat_wmt_en_de(args): inat_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def ensemble_encoder(func): def wrapper(self, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, *args, **kwargs) encoder_outs = [ func(model, *args, **kwargs, return_all_hiddens=True) for model in self.ensemble_models ] _encoder_out = encoder_outs[0].copy() def stack(key): outs = [e[key][0] for e in encoder_outs] return [torch.stack(outs, -1) if outs[0] is not None else None] _encoder_out["encoder_out"] = stack("encoder_out") _encoder_out["encoder_embedding"] = stack("encoder_embedding") num_layers = len(_encoder_out["encoder_states"]) if num_layers > 0: _encoder_out["encoder_states"] = [ torch.stack([e["encoder_states"][i] for e in encoder_outs], -1) for i in range(num_layers) ] return _encoder_out return wrapper def ensemble_decoder(func): def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func( self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs ) def _replace(encoder_out, new_val): new_encoder_out = encoder_out.copy() new_encoder_out["encoder_out"] = [new_val] return new_encoder_out action_outs = [ func( model, normalize=normalize, encoder_out=_replace( encoder_out, encoder_out["encoder_out"][0][:, :, :, i] ), *args, **kwargs ) for i, model in enumerate(self.ensemble_models) ] if not isinstance(action_outs[0], tuple): # return multiple values action_outs = [[a] for a in action_outs] else: action_outs = [list(a) for a in action_outs] ensembled_outs = [] for i in range(len(action_outs[0])): if i == 0 and normalize: ensembled_outs += [ torch.logsumexp( torch.stack([a[i] for a in action_outs], -1), dim=-1 ) - math.log(len(self.ensemble_models)) ] elif action_outs[0][i] is not None: ensembled_outs += [torch.stack([a[i] for a in action_outs], -1)] else: ensembled_outs += [None] if len(ensembled_outs) == 1: return ensembled_outs[0] return tuple(ensembled_outs) return wrapper class FairseqNATModel(TransformerModel): """ Abstract class for all nonautoregressive-based models """ def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() self.ensemble_models = None @property def allow_length_beam(self): return False @property def allow_ensemble(self): return True def enable_ensemble(self, models): self.encoder.ensemble_models = [m.encoder for m in models] self.decoder.ensemble_models = [m.decoder for m in models] @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = FairseqNATEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder(self, *args, **kwargs): return NotImplementedError def initialize_output_tokens(self, *args, **kwargs): return NotImplementedError def forward(self, *args, **kwargs): return NotImplementedError class FairseqNATEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.ensemble_models = None @ensemble_encoder def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class FairseqNATDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) self.ensemble_models = None
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/fairseq_nat_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .fairseq_nat_model import * from .nonautoregressive_transformer import * from .nat_crf_transformer import * from .iterative_nonautoregressive_transformer import * from .cmlm_transformer import * from .levenshtein_transformer import * from .insertion_transformer import *
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules import TransformerDecoderLayer from fairseq.modules.transformer_sentence_encoder import init_bert_params from .levenshtein_utils import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _get_del_targets, _get_ins_targets, _skip, _skip_encoder_out, ) @register_model("levenshtein_transformer") class LevenshteinTransformerModel(FairseqNATModel): @property def allow_length_beam(self): return False @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers before word_del, mask_ins, word_ins", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="separate parameters for discriminator", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="separate parameters for mask-predictor", ) parser.add_argument( "--share-discriminator-maskpredictor", action="store_true", help="share the parameters for both mask-predictor and discriminator", ) parser.add_argument( "--sampling-for-deletion", action="store_true", help="instead of argmax, use sampling to predict the tokens", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_out, _ = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=masked_tgt_tokens, encoder_out=encoder_out, ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1 ).view(word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del( normalize=False, prev_output_tokens=word_predictions, encoder_out=encoder_out, ) word_del_masks = word_predictions.ne(self.pad) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True, }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_del_masks, }, } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn history = decoder_out.history bsz = output_tokens.size(0) if max_ratio is None: max_lens = torch.zeros_like(output_tokens).fill_(255) else: if not encoder_out["encoder_padding_mask"]: max_src_len = encoder_out["encoder_out"].size(0) src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len) else: src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip word_del_score, word_del_attn = self.decoder.forward_word_del( normalize=True, prev_output_tokens=_skip(output_tokens, can_del_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word), ) word_del_pred = word_del_score.max(-1)[1].bool() _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) if history is not None: history.append(output_tokens.clone()) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: mask_ins_score, _ = self.decoder.forward_mask_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_mask), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask), ) if eos_penalty > 0.0: mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) if history is not None: history.append(output_tokens.clone()) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: word_ins_score, word_ins_attn = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word), ) word_ins_score, word_ins_pred = word_ins_score.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) if history is not None: history.append(output_tokens.clone()) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) initial_output_tokens[:, 0] = self.bos initial_output_tokens[:, 1] = self.eos initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) class LevenshteinTransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(",")] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ] ) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ] ) if getattr(args, "share_discriminator_maskpredictor", False): assert getattr( args, "no_share_discriminator", False ), "must set saperate discriminator" self.layers_msk = self.layers_del def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[:early_exit]): x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} @ensemble_decoder def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) decoder_out = F.linear(features_cat, self.embed_mask_ins.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) decoder_out = self.output_layer(features) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @ensemble_decoder def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) decoder_out = F.linear(features, self.embed_word_del.weight) if normalize: return F.log_softmax(decoder_out, -1), extra["attn"] return decoder_out, extra["attn"] @register_model_architecture("levenshtein_transformer", "levenshtein_transformer") def levenshtein_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr( args, "share_discriminator_maskpredictor", False ) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): levenshtein_base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) levenshtein_base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.models.nat import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _skip, _skip_encoder_out, ) class _EnsembleModelEncoder(object): def __init__(self, models): self.models = models def reorder_encoder_out(self, encoder_outs, new_order): encoder_outs = [ model.encoder.reorder_encoder_out(encoder_out, new_order) for model, encoder_out in zip(self.models, encoder_outs) ] return encoder_outs class BasicEnsembleModel(torch.nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models = torch.nn.ModuleList(models) self.bos = self.models[0].decoder.dictionary.bos() self.eos = self.models[0].decoder.dictionary.eos() self.pad = self.models[0].decoder.dictionary.pad() self.unk = self.models[0].decoder.dictionary.unk() self.encoder = _EnsembleModelEncoder(self.models) def has_encoder(self): return hasattr(self.models[0], "encoder") def max_decoder_positions(self): return min(m.max_decoder_positions() for m in self.models) @torch.no_grad() def forward_encoder(self, encoder_input): if not self.has_encoder(): return None return [model.forward_encoder(encoder_input) for model in self.models] @torch.no_grad() def forward_decoder(self, *inputs): raise NotImplementedError def initialize_output_tokens(self, *inputs): raise NotImplementedError class EnsembleLevT(BasicEnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) @torch.no_grad() def forward_decoder( self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs ): # LevT ensembling # A pipeline of three steps: deletion, placeholder, and word insertion. # We need to average scores in each step in a pipeline way because of dependence. # deletion output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn bsz = output_tokens.size(0) if max_ratio is None: max_lens = output_tokens.new().fill_(255) else: if not encoder_outs[0]["encoder_padding_mask"]: src_lens = ( encoder_outs[0]["encoder_out"][0] .new(bsz) .fill_(encoder_outs[0]["encoder_out"][0].size(1)) ) else: src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip output_tokens, output_scores, attn = self.forward_word_del( encoder_outs, output_tokens, output_scores, attn, can_del_word, ) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: output_tokens, output_scores = self.forward_mask_ins( encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: output_tokens, output_scores, attn = self.forward_word_ins( encoder_outs, output_tokens, output_scores, attn, can_ins_word, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=None, ) def forward_word_del( self, encoder_outs, output_tokens, output_scores, attn, can_del_word ): word_del_score_avg = [] word_del_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_del_out, word_del_attn = model.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(model.encoder, encoder_out, can_del_word), ) word_del_score = F.log_softmax(word_del_out, 2) word_del_score_avg.append(word_del_score) word_del_attn_avg.append(word_del_attn) word_del_score_avg = torch.logsumexp( torch.stack(word_del_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) word_del_pred = word_del_score_avg.max(-1)[1].bool() if word_del_attn_avg[0] is not None: word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models) else: word_del_attn_avg = None _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn_avg, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.0) return output_tokens, output_scores, attn def forward_mask_ins( self, encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ): mask_ins_score_avg = [] for model, encoder_out in zip(self.models, encoder_outs): mask_ins_out, _ = model.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_score_avg.append(mask_ins_score) mask_ins_score_avg = torch.logsumexp( torch.stack(mask_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) mask_ins_pred = mask_ins_score_avg.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) return output_tokens, output_scores def forward_word_ins( self, encoder_outs, output_tokens, output_scores, attn, can_ins_word ): word_ins_score_avg = [] word_ins_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_ins_out, word_ins_attn = model.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(model.encoder, encoder_out, can_ins_word), ) word_ins_score = F.log_softmax(word_ins_out, 2) word_ins_score_avg.append(word_ins_score) word_ins_attn_avg.append(word_ins_attn) word_ins_score_avg = torch.logsumexp( torch.stack(word_ins_score_avg, dim=0), dim=0 ) - math.log(len(self.models)) if word_ins_attn_avg[0] is not None: word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models) else: word_ins_attn_avg = None word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score_max, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.0) return output_tokens, output_scores, attn def initialize_output_tokens(self, encoder_outs, src_tokens): # LevT doesn't do length prediction. return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/nonautoregressive_ensembles.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x) def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t @register_model("nonautoregressive_transformer") class NATransformerModel(FairseqNATModel): @property def allow_length_beam(self): return True @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) # length prediction parser.add_argument( "--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder", ) parser.add_argument( "--pred-length-offset", action="store_true", help="predicting the length difference between the target and source sentences", ) parser.add_argument( "--sg-length-pred", action="store_true", help="stop the gradients back-propagated from the length predictor", ) parser.add_argument( "--length-loss-factor", type=float, help="weights on the length prediction loss", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": tgt_tokens.ne(self.pad), "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.ne(self.pad) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, step=step, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) def initialize_output_tokens(self, encoder_out, src_tokens): # length prediction length_tgt = self.decoder.forward_length_prediction( self.decoder.forward_length(normalize=True, encoder_out=encoder_out), encoder_out=encoder_out, ) max_length = length_tgt.clamp_(min=2).max() idx_length = utils.new_arange(src_tokens, max_length) initial_output_tokens = src_tokens.new_zeros( src_tokens.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out["encoder_out"][0]) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None, ) def regenerate_length_beam(self, decoder_out, beam_size): output_tokens = decoder_out.output_tokens length_tgt = output_tokens.ne(self.pad).sum(1) length_tgt = ( length_tgt[:, None] + utils.new_arange(length_tgt, 1, beam_size) - beam_size // 2 ) length_tgt = length_tgt.view(-1).clamp_(min=2) max_length = length_tgt.max() idx_length = utils.new_arange(length_tgt, max_length) initial_output_tokens = output_tokens.new_zeros( length_tgt.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(decoder_out.output_scores) return decoder_out._replace( output_tokens=initial_output_tokens, output_scores=initial_output_scores ) class NATransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.encoder_embed_dim = args.encoder_embed_dim self.sg_length_pred = getattr(args, "sg_length_pred", False) self.pred_length_offset = getattr(args, "pred_length_offset", False) self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) self.src_embedding_copy = getattr(args, "src_embedding_copy", False) self.embed_length = Embedding(256, self.encoder_embed_dim, None) @ensemble_decoder def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused): features, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, embedding_copy=(step == 0) & self.src_embedding_copy, ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out @ensemble_decoder def forward_length(self, normalize, encoder_out): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None enc_feats = _mean_pooling(enc_feats, src_masks) if self.sg_length_pred: enc_feats = enc_feats.detach() length_out = F.linear(enc_feats, self.embed_length.weight) return F.log_softmax(length_out, -1) if normalize else length_out def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, embedding_copy=False, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embedding if embedding_copy: src_embd = encoder_out["encoder_embedding"][0] if len(encoder_out["encoder_padding_mask"]) > 0: src_mask = encoder_out["encoder_padding_mask"][0] else: src_mask = None src_mask = ( ~src_mask if src_mask is not None else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() ) x, decoder_padding_mask = self.forward_embedding( prev_output_tokens, self.forward_copying_source( src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) ), ) else: x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for i, layer in enumerate(self.layers): # early exit from the decoder. if (early_exit is not None) and (i >= early_exit): break x, attn, _ = layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_embedding(self, prev_output_tokens, states=None): # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions if states is None: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) else: x = states if positions is not None: x += positions x = self.dropout_module(x) decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) return x, decoder_padding_mask def forward_copying_source(self, src_embeds, src_masks, tgt_masks): length_sources = src_masks.sum(1) length_targets = tgt_masks.sum(1) mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( ~tgt_masks, 0 ) copied_embedding = torch.gather( src_embeds, 1, mapped_inputs.unsqueeze(-1).expand( *mapped_inputs.size(), src_embeds.size(-1) ), ) return copied_embedding def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None): enc_feats = encoder_out["encoder_out"][0] # T x B x C if len(encoder_out["encoder_padding_mask"]) > 0: src_masks = encoder_out["encoder_padding_mask"][0] # B x T else: src_masks = None if self.pred_length_offset: if src_masks is None: src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( enc_feats.size(0) ) else: src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) src_lengs = src_lengs.long() if tgt_tokens is not None: # obtain the length target tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() if self.pred_length_offset: length_tgt = tgt_lengs - src_lengs + 128 else: length_tgt = tgt_lengs length_tgt = length_tgt.clamp(min=0, max=255) else: # predict the length target (greedy for now) # TODO: implementing length-beam pred_lengs = length_out.max(-1)[1] if self.pred_length_offset: length_tgt = pred_lengs - 128 + src_lengs else: length_tgt = pred_lengs return length_tgt @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" ) def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file implements: Ghazvininejad, Marjan, et al. "Constant-time machine translation with conditional masked language models." arXiv preprint arXiv:1904.09324 (2019). """ from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel from fairseq.utils import new_arange def _skeptical_unmasking(output_scores, output_masks, p): sorted_index = output_scores.sort(-1)[1] boundary_len = ( (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p ).long() skeptical_mask = new_arange(output_masks) < boundary_len return skeptical_mask.scatter(1, sorted_index, skeptical_mask) @register_model("cmlm_transformer") class CMLMNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert not self.decoder.src_embedding_copy, "do not support embedding copy." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_mask = prev_output_tokens.eq(self.unk) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step max_step = decoder_out.max_step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.eq(self.unk) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) # skeptical decoding (depend on the maximum decoding steps.) if (step + 1) < max_step: skeptical_mask = _skeptical_unmasking( output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step ) output_tokens.masked_fill_(skeptical_mask, self.unk) output_scores.masked_fill_(skeptical_mask, 0.0) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("cmlm_transformer", "cmlm_transformer") def cmlm_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de") def cmlm_wmt_en_de(args): cmlm_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/cmlm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel, base_architecture from fairseq.modules import DynamicCRF @register_model("nacrf_transformer") class NACRFTransformerModel(NATransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.crf_layer = DynamicCRF( num_embedding=len(self.tgt_dict), low_rank=args.crf_lowrank_approx, beam_size=args.crf_beam_approx, ) @property def allow_ensemble(self): return False @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument( "--crf-lowrank-approx", type=int, help="the dimension of low-rank approximation of transition", ) parser.add_argument( "--crf-beam-approx", type=int, help="the beam size for apporixmating the normalizing factor", ) parser.add_argument( "--word-ins-loss-factor", type=float, help="weights on NAT loss used to co-training with CRF loss.", ) def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length( normalize=False, encoder_out=encoder_out ) length_tgt = self.decoder.forward_length_prediction( length_out, encoder_out, tgt_tokens ) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad) # compute the log-likelihood of CRF crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask) crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean() return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True, "factor": self.args.word_ins_loss_factor, }, "word_crf": {"loss": crf_nll}, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor, }, } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder and get emission scores output_masks = output_tokens.ne(self.pad) word_ins_out = self.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out ) # run viterbi decoding through CRF _scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) @register_model_architecture("nacrf_transformer", "nacrf_transformer") def nacrf_base_architecture(args): args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32) args.crf_beam_approx = getattr(args, "crf_beam_approx", 64) args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/nat_crf_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import ( FairseqNATModel, LevenshteinTransformerDecoder, LevenshteinTransformerModel, ensemble_decoder, ) from fairseq.models.transformer import Linear from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange class NegativeDistanceScore(object): def __init__(self): # pre-compute some values self.scores = {} self.scores[0.5] = self.compute_score_full(50, 0.5) self.scores[1.0] = self.compute_score_full(50, 1.0) self.scores[2.0] = self.compute_score_full(50, 2.0) def __call__(self, i, L, tau): if (tau is None) or (tau > 1000): return 1 / L if tau in self.scores: if L < self.scores[tau].shape[0]: return self.scores[tau][L - 1, i] return self.compute_score(L, tau)[i] def compute_score(self, L, tau): s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) s = np.exp(s - s.max()) return s / s.sum() def compute_score_full(self, L, tau): s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau s = np.tril(s, 0) + np.triu(s - float("inf"), 1) s = np.exp(s - s.max(1, keepdims=True)) return s / s.sum(1, keepdims=True) neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores @register_model("insertion_transformer") class InsertionTransformerModel(LevenshteinTransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument("--label-tau", default=None, type=float) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion word_ins_out = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) word_ins_tgt = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk, len(self.tgt_dict), tau=self.decoder.label_tau, ).type_as(word_ins_out) word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_masks, "ls": self.args.label_smoothing, "nll_loss": True, } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # TODO: decoding for InsertionTransformer word_ins_score = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out ) if eos_penalty > 0.0: word_ins_score[:, :, self.pad] -= eos_penalty word_ins_score, word_ins_pred = word_ins_score.max(-1) output_tokens, output_scores = _apply_ins_words( output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history, ) class InsertionTransformerDecoder(LevenshteinTransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): # use the TransformerDecoder's __init__ super(LevenshteinTransformerDecoder, self).__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) self.label_tau = getattr(args, "label_tau", None) @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] features = self.pool_out( torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out def forward_mask_ins(self, *args, **kwargs): raise NotImplementedError def forward_word_del(self, *args, **kwargs): raise NotImplementedError @register_model_architecture("insertion_transformer", "insertion_transformer") def insertion_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/nat/insertion_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/bart/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ from typing import Optional import logging import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface logger = logging.getLogger(__name__) @register_model("bart") class BARTModel(TransformerModel): __jit_unused_properties__ = ["supported_targets"] @classmethod def hub_models(cls): return { "bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz", "bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz", "bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz", "bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz", "bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz", } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() if hasattr(self.encoder, "dictionary"): self.eos: int = self.encoder.dictionary.eos() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--spectral-norm-classification-head", action="store_true", help="Apply spectral normalization on the classification head", ) @property def supported_targets(self): return {"self"} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only: bool = False, classification_head_name: Optional[str] = None, token_embeddings: Optional[torch.Tensor] = None, return_all_hiddens: bool = True, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, token_embeddings=token_embeddings, return_all_hiddens=return_all_hiddens, ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) eos: int = self.eos if classification_head_name is not None: sentence_representation = x[src_tokens.eq(eos), :].view( x.size(0), -1, x.size(-1) )[:, -1, :] for k, head in self.classification_heads.items(): # for torch script only supports iteration if k == classification_head_name: x = head(sentence_representation) break return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="gpt2", sample_break_mode="eos", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, sample_break_mode=sample_break_mode, **kwargs, ) return BARTHubInterface(x["args"], x["task"], x["models"][0]) def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" logger.info("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( input_dim=self.args.encoder_embed_dim, inner_dim=inner_dim or self.args.encoder_embed_dim, num_classes=num_classes, activation_fn=self.args.pooler_activation_fn, pooler_dropout=self.args.pooler_dropout, do_spectral_norm=getattr( self.args, "spectral_norm_classification_head", False ), ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) if ( loaded_dict_size == len(self.encoder.dictionary) + 1 and "<mask>" not in self.encoder.dictionary ): truncate_emb("encoder.embed_tokens.weight") truncate_emb("decoder.embed_tokens.weight") truncate_emb("encoder.output_projection.weight") truncate_emb("decoder.output_projection.weight") # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == "multilingual_denoising" and loaded_dict_size < len( self.encoder.dictionary ): logger.info( "Adding extra language embeddings not found in pretrained model for " "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][ -1, : ] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict["encoder.embed_tokens.weight"].dtype, ) state_dict["encoder.embed_tokens.weight"] = torch.cat( [ state_dict["encoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) state_dict["decoder.embed_tokens.weight"] = torch.cat( [ state_dict["decoder.embed_tokens.weight"][ : loaded_dict_size - 1, : ], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0), ] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, do_spectral_norm=False, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) if do_spectral_norm: self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture("bart", "bart_large") def bart_large_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.dropout = getattr(args, "dropout", 0.1) args.max_target_positions = getattr(args, "max_target_positions", 1024) args.max_source_positions = getattr(args, "max_source_positions", 1024) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", True ) args.share_all_embeddings = getattr(args, "share_all_embeddings", True) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) @register_model_architecture("bart", "bart_base") def bart_base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) bart_large_architecture(args) @register_model_architecture("bart", "mbart_large") def mbart_large_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_large_architecture(args) @register_model_architecture("bart", "mbart_base") def mbart_base_architecture(args): args.no_scale_embedding = getattr(args, "no_scale_embedding", False) bart_base_architecture(args) @register_model_architecture("bart", "mbart_base_wmt20") def mbart_base_wmt20_architecture(args): args.layernorm_embedding = getattr(args, "layernorm_embedding", False) mbart_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/bart/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, List import numpy as np import torch import torch.nn.functional as F from fairseq import utils from fairseq.hub_utils import GeneratorHubInterface logger = logging.getLogger(__name__) class BARTHubInterface(GeneratorHubInterface): """A simple PyTorch Hub interface to BART. Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart """ def __init__(self, cfg, task, model): super().__init__(cfg, task, [model]) self.model = self.models[0] def encode( self, sentence: str, *addl_sentences, no_separator=True ) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`). Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> bart.encode('Hello world').tolist() [0, 31414, 232, 2] >>> bart.encode(' world').tolist() [0, 232, 2] >>> bart.encode('world').tolist() [0, 8331, 2] """ tokens = self.bpe.encode(sentence) if len(tokens.split(" ")) > min(self.max_positions) - 2: tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2]) bpe_sentence = "<s> " + tokens + " </s>" for s in addl_sentences: bpe_sentence += " </s>" if not no_separator else "" bpe_sentence += " " + self.bpe.encode(s) + " </s>" tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.cpu().numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = tokens == self.task.source_dictionary.eos() doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [ self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences ] if len(sentences) == 1: return sentences[0] return sentences def _build_sample(self, src_tokens: List[torch.LongTensor]): # assert torch.is_tensor(src_tokens) dataset = self.task.build_dataset_for_inference( src_tokens, [x.numel() for x in src_tokens], ) sample = dataset.collater(dataset) sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample) return sample def generate( self, tokenized_sentences: List[torch.LongTensor], *args, inference_step_args=None, skip_invalid_size_inputs=False, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: inference_step_args = inference_step_args or {} if "prefix_tokens" in inference_step_args: raise NotImplementedError("prefix generation not implemented for BART") res = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): src_tokens = batch["net_input"]["src_tokens"] inference_step_args["prefix_tokens"] = src_tokens.new_full( (src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos() ).to(device=self.device) results = super().generate( src_tokens, *args, inference_step_args=inference_step_args, skip_invalid_size_inputs=skip_invalid_size_inputs, **kwargs ) for id, hypos in zip(batch["id"].tolist(), results): res.append((id, hypos)) res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])] return res def extract_features( self, tokens: torch.LongTensor, return_all_hiddens: bool = False ) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > min(self.model.max_positions()): raise ValueError( "tokens exceeds maximum length: {} > {}".format( tokens.size(-1), self.model.max_positions() ) ) tokens.to(device=self.device), prev_output_tokens = tokens.clone() prev_output_tokens[:, 0] = tokens.gather( 1, (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1), ).squeeze() prev_output_tokens[:, 1:] = tokens[:, :-1] features, extra = self.model( src_tokens=tokens, src_lengths=None, prev_output_tokens=prev_output_tokens, features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra["inner_states"] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): if tokens.dim() == 1: tokens = tokens.unsqueeze(0) features = self.extract_features(tokens.to(device=self.device)) sentence_representation = features[ tokens.eq(self.task.source_dictionary.eos()), : ].view(features.size(0), -1, features.size(-1))[:, -1, :] logits = self.model.classification_heads[head](sentence_representation) if return_logits: return logits return F.log_softmax(logits, dim=-1) def fill_mask( self, masked_inputs: List[str], topk: int = 5, match_source_len: bool = True, **generate_kwargs ): masked_token = "<mask>" batch_tokens = [] for masked_input in masked_inputs: assert ( masked_token in masked_input ), "please add one {} token for the input".format(masked_token) text_spans = masked_input.split(masked_token) text_spans_bpe = ( (" {0} ".format(masked_token)) .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans]) .strip() ) tokens = self.task.source_dictionary.encode_line( "<s> " + text_spans_bpe + " </s>", append_eos=False, add_if_not_exist=False, ).long() batch_tokens.append(tokens) # ensure beam size is at least as big as topk generate_kwargs["beam"] = max( topk, generate_kwargs.get("beam", -1), ) generate_kwargs["match_source_len"] = match_source_len batch_hypos = self.generate(batch_tokens, **generate_kwargs) return [ [(self.decode(hypo["tokens"]), hypo["score"]) for hypo in hypos[:topk]] for hypos in batch_hypos ]
KosmosX-API-main
kosmosX/fairseq/fairseq/models/bart/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .wav2vec import * # noqa from .wav2vec2 import * # noqa from .wav2vec2_asr import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/wav2vec/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import math import re from argparse import Namespace from dataclasses import dataclass, field from typing import Any, Optional import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from omegaconf import II, MISSING, open_dict from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer from fairseq.tasks import FairseqTask @dataclass class Wav2Vec2AsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to wav2vec 2.0 model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={"help": "dropout after transformer and before final projection"}, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"} ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside wav2vec 2.0 model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside wav2vec 2.0 model" }, ) conv_feature_layers: Optional[str] = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": ( "string describing convolutional feature extraction " "layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" ), }, ) encoder_embed_dim: Optional[int] = field( default=768, metadata={"help": "encoder embedding dimension"} ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask (normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune wav2vec for this many updates"} ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"} ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"} ) mask_channel_min_space: Optional[int] = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) mask_channel_before: bool = False normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded wav2vec args w2v_args: Any = None checkpoint_activations: bool = field( default=False, metadata={"help": "checkpoint_activations"} ) offload_activations: bool = field( default=False, metadata={"help": "offload_activations"} ) min_params_to_wrap: int = field( default=int(1e8), metadata={ "help": "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." }, ) checkpoint_activations: bool = field( default=False, metadata={"help": "recompute activations and save memory for extra compute"}, ) ddp_backend: str = II("distributed_training.ddp_backend") @dataclass class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig): blank_weight: float = 0 blank_mode: str = "add" @register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig) class Wav2VecCtc(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder self.blank_weight = cfg.blank_weight self.blank_mode = cfg.blank_mode def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary)) return cls(cfg, w2v_encoder) def get_logits(self, net_output, normalize=False): logits = net_output["encoder_out"] if self.blank_weight != 0: if self.blank_mode == "add": logits[..., 0] += self.blank_weight elif self.blank_mode == "set": logits[..., 0] = self.blank_weight else: raise Exception(f"invalid blank mode {self.blank_mode}") if net_output["padding_mask"] is not None and net_output["padding_mask"].any(): number_of_classes = logits.size(-1) masking_tensor = torch.ones( number_of_classes, device=logits.device ) * float("-inf") masking_tensor[0] = 0 logits[net_output["padding_mask"].T] = masking_tensor.type_as(logits) if normalize: logits = utils.log_softmax(logits.float(), dim=-1) return logits def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = self.get_logits(net_output) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) autoregressive: bool = II("task.autoregressive") @register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig) class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask): """Build a new model instance.""" assert ( cfg.autoregressive ), "Please set task.autoregressive=true for seq2seq asr models" src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) return emb decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim) encoder = cls.build_encoder(cfg) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) return Wav2Vec2Seq2SeqModel(encoder, decoder) @classmethod def build_encoder(cls, cfg: Wav2Vec2AsrConfig): return Wav2VecEncoder(cfg) @classmethod def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens): return TransformerDecoder(cfg, tgt_dict, embed_tokens) def forward(self, **kwargs): encoder_out = self.encoder(**kwargs) decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) return decoder_out def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict class Wav2VecEncoder(FairseqEncoder): def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_before": cfg.mask_channel_before, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, "checkpoint_activations": cfg.checkpoint_activations, "offload_activations": cfg.offload_activations, "min_params_to_wrap": cfg.min_params_to_wrap, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) w2v_args.criterion = None w2v_args.lr_scheduler = None cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for both pre-training and here" ) if hasattr(cfg, "checkpoint_activations") and cfg.checkpoint_activations: with open_dict(w2v_args): w2v_args.model.checkpoint_activations = cfg.checkpoint_activations w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model, from_checkpoint=True) if state is not None and not cfg.no_pretrained_weights: self.load_model_weights(state, model, cfg) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 targ_d = None self.proj = None if output_size is not None: targ_d = output_size elif getattr(cfg, "decoder_embed_dim", d) != d: targ_d = cfg.decoder_embed_dim if targ_d is not None: self.proj = Linear(d, targ_d) def load_model_weights(self, state, model, cfg): if cfg.ddp_backend == "fully_sharded": from fairseq.distributed import FullyShardedDataParallel for name, module in model.named_modules(): if "encoder.layers" in name and len(name.split(".")) == 3: # Only for layers, we do a special handling and load the weights one by one # We dont load all weights together as that wont be memory efficient and may # cause oom new_dict = { k.replace(name + ".", ""): v for (k, v) in state["model"].items() if name + "." in k } assert isinstance(module, FullyShardedDataParallel) with module.summon_full_params(): module.load_state_dict(new_dict, strict=True) module._reset_lazy_init() # Once layers are loaded, filter them out and load everything else. r = re.compile("encoder.layers.\d.") filtered_list = list(filter(r.match, state["model"].keys())) new_big_dict = { k: v for (k, v) in state["model"].items() if k not in filtered_list } model.load_state_dict(new_big_dict, strict=False) else: model.load_state_dict(state["model"], strict=True) def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): res = self.w2v_model.extract_features(**w2v_args) x = res["x"] padding_mask = res["padding_mask"] # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "padding_mask": padding_mask, # B x T, "layer_results": res["layer_results"], } def forward_torchscript(self, net_input): if torch.jit.is_scripting(): return self.forward(net_input["source"], net_input["padding_mask"]) else: return self.forward_non_torchscript(net_input) def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["padding_mask"] is not None: encoder_out["padding_mask"] = encoder_out["padding_mask"].index_select( 0, new_order ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg: Wav2Vec2Seq2SeqConfig, dictionary, embed_tokens, no_encoder_attn=False, ): super().__init__(dictionary) self.dropout = cfg.decoder_dropout self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder_embed_dim self.output_embed_dim = cfg.decoder_embed_dim self.layerdrop = cfg.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( cfg.max_target_positions, embed_dim, self.padding_idx, learned=cfg.decoder_learned_pos, ) if not cfg.no_token_positional_embeddings else None ) # TODO: update this when transformer gets converted to dataclass configs transformer_cfg = copy.deepcopy(cfg) with open_dict(transformer_cfg): transformer_cfg.dropout = transformer_cfg.decoder_dropout transformer_cfg.attention_dropout = ( transformer_cfg.decoder_attention_dropout ) transformer_cfg.activation_dropout = ( transformer_cfg.decoder_activation_dropout ) self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerDecoderLayer(transformer_cfg, no_encoder_attn) for _ in range(transformer_cfg.decoder_layers) ] ) if not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if transformer_cfg.decoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ prev_output_tokens = prev_output_tokens.long() x, extra = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers self_attn_padding_mask = None if prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) for layer in self.layers: dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, attn, _ = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["padding_mask"] if encoder_out is not None else None, incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, self_attn_padding_mask=self_attn_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
KosmosX-API-main
kosmosX/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GradMultiply, GumbelVectorQuantizer, LayerNorm, MultiheadAttention, SamePad, TransposeLast, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import buffered_arange, index_put, is_xla_tensor from fairseq.distributed import fsdp_wrap from fairseq.modules.conformer_layer import ConformerWav2Vec2EncoderLayer from fairseq.modules import RelPositionalEncoding from .utils import pad_to_multiple EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"]) MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"]) LAYER_TYPE_CHOICES = ChoiceEnum(["transformer", "conformer"]) @dataclass class Wav2Vec2Config(FairseqDataclass): extractor_mode: EXTRACTOR_MODE_CHOICES = field( default="default", metadata={ "help": "mode for feature extractor. default has a single group norm with d " "groups in the first conv block, whereas layer_norm has layer norms in " "every block (meant to use with normalize=True)" }, ) encoder_layers: int = field( default=12, metadata={"help": "num encoder layers in the transformer"} ) encoder_embed_dim: int = field( default=768, metadata={"help": "encoder embedding dimension"} ) encoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "encoder embedding dimension for FFN"} ) encoder_attention_heads: int = field( default=12, metadata={"help": "num encoder attention heads"} ) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( default="gelu", metadata={"help": "activation function to use"} ) layer_type: LAYER_TYPE_CHOICES = field( default="transformer", metadata={"help": "layer type in encoder"} ) # dropouts dropout: float = field( default=0.1, metadata={"help": "dropout probability for the transformer"} ) attention_dropout: float = field( default=0.1, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) encoder_layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features (after feat extr)"}, ) final_dim: int = field( default=0, metadata={ "help": "project final representations and targets to this many dimensions." "set to encoder_embed_dim is <= 0" }, ) layer_norm_first: bool = field( default=False, metadata={"help": "apply layernorm first in the transformer"} ) conv_feature_layers: str = field( default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", metadata={ "help": "string describing convolutional feature extraction layers in form of a python list that contains " "[(dim, kernel_size, stride), ...]" }, ) conv_bias: bool = field( default=False, metadata={"help": "include bias in conv encoder"} ) logit_temp: float = field( default=0.1, metadata={"help": "temperature to divide logits by"} ) quantize_targets: bool = field( default=False, metadata={"help": "use quantized targets"} ) quantize_input: bool = field( default=False, metadata={"help": "use quantized inputs"} ) same_quantizer: bool = field( default=False, metadata={"help": "use same quantizer for inputs and targets"} ) target_glu: bool = field( default=False, metadata={"help": "adds projection + glu to targets"} ) feature_grad_mult: float = field( default=1.0, metadata={"help": "multiply feature extractor var grads by this"} ) quantizer_depth: int = field( default=1, metadata={"help": "number of quantizer layers"}, ) quantizer_factor: int = field( default=3, metadata={ "help": "dimensionality increase for inner quantizer layers (if depth > 1)" }, ) latent_vars: int = field( default=320, metadata={"help": "number of latent variables V in each group of the codebook"}, ) latent_groups: int = field( default=2, metadata={"help": "number of groups G of latent variables in the codebook"}, ) latent_dim: int = field( default=0, metadata={ "help": "if > 0, uses this dimensionality for latent variables. " "otherwise uses final_dim / latent_groups" }, ) # masking mask_length: int = field(default=10, metadata={"help": "mask length"}) mask_prob: float = field( default=0.65, metadata={"help": "probability of replacing a token with mask"} ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) mask_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_before: bool = False mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) mask_channel_min_space: int = field( default=1, metadata={"help": "min space between spans (if no overlap is enabled)"}, ) # negative selection num_negatives: int = field( default=100, metadata={"help": "number of negative examples from the same sample"}, ) negatives_from_everywhere: bool = field( default=False, metadata={"help": "sample negatives from everywhere, not just masked states"}, ) cross_sample_negatives: int = field( default=0, metadata={"help": "number of negative examples from the any sample"} ) codebook_negatives: int = field( default=0, metadata={"help": "number of negative examples codebook"} ) # positional embeddings conv_pos: int = field( default=128, metadata={"help": "number of filters for convolutional positional embeddings"}, ) conv_pos_groups: int = field( default=16, metadata={"help": "number of groups for convolutional positional embedding"}, ) latent_temp: Tuple[float, float, float] = field( default=(2, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling. " "can be tuple of 3 values (start, end, decay)" }, ) max_positions: int = field(default=100000, metadata={"help": "Max positions"}) checkpoint_activations: bool = field( default=False, metadata={"help": "recompute activations and save memory for extra compute"}, ) # FP16 optimization required_seq_len_multiple: int = field( default=1, metadata={ "help": "pad the input to encoder such that the sequence length is divisible by multiple" }, ) crop_seq_to_multiple: int = field( default=1, metadata={ "help": "crop convolutional feature extractor output such that the sequence length is divisible by multiple" }, ) # Conformer depthwise_conv_kernel_size: int = field( default=31, metadata={ "help": "depthwise-conv-kernel-size for convolution in conformer layer" }, ) attn_type: str = field( default="", metadata={"help": "if espnet use ESPNET MHA"}, ) pos_enc_type: str = field( default="abs", metadata={"help": "Positional encoding type to use in conformer"}, ) fp16: bool = field(default=False, metadata={"help": "If fp16 is being used"}) @register_model("wav2vec2", dataclass=Wav2Vec2Config) class Wav2Vec2Model(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2Config): super().__init__() self.cfg = cfg feature_enc_layers = eval(cfg.conv_feature_layers) self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias, ) self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input else None ) self.crop_seq_to_multiple = cfg.crop_seq_to_multiple self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_before = cfg.mask_channel_before self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.quantizer = None self.input_quantizer = None self.n_negatives = cfg.num_negatives self.cross_sample_negatives = cfg.cross_sample_negatives self.codebook_negatives = cfg.codebook_negatives self.negatives_from_everywhere = cfg.negatives_from_everywhere self.logit_temp = cfg.logit_temp final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim if cfg.quantize_targets: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim self.quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_q = nn.Linear(vq_dim, final_dim) else: self.project_q = nn.Linear(self.embed, final_dim) if cfg.quantize_input: if cfg.same_quantizer and self.quantizer is not None: vq_dim = final_dim self.input_quantizer = self.quantizer else: vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim self.input_quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor, ) self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim) self.mask_emb = nn.Parameter( torch.FloatTensor(cfg.encoder_embed_dim).uniform_() ) encoder_cls = TransformerEncoder if cfg.layer_type == "conformer" and cfg.pos_enc_type in ["rel_pos", "rope"]: encoder_cls = ConformerEncoder self.encoder = encoder_cls(cfg) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if cfg.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2Config, task=None): """Build a new model instance.""" return cls(cfg) def apply_mask( self, x, padding_mask, mask_indices=None, mask_channel_indices=None, ): B, T, C = x.shape if self.mask_channel_prob > 0 and self.mask_channel_before: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 if self.mask_prob > 0: if mask_indices is None: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x = index_put(x, mask_indices, self.mask_emb) else: mask_indices = None if self.mask_channel_prob > 0 and not self.mask_channel_before: if mask_channel_indices is None: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x = index_put(x, mask_channel_indices, 0) return x, mask_indices def sample_negatives(self, y, num, padding_count=None): if self.n_negatives == 0 and self.cross_sample_negatives == 0: return y.new(0) bsz, tsz, fsz = y.shape y = y.view(-1, fsz) # BTC => (BxT)C # FIXME: what happens if padding_count is specified? cross_high = tsz * bsz high = tsz - (padding_count or 0) with torch.no_grad(): assert high > 1, f"{bsz,tsz,fsz}" if self.n_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * num) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * num), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: neg_idxs = neg_idxs + (torch.arange(bsz).unsqueeze(1) * high) else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[neg_idxs.view(-1)] negs = negs.view( bsz, num, self.n_negatives + self.cross_sample_negatives, fsz ).permute( 2, 0, 1, 3 ) # to NxBxTxC return negs, neg_idxs def compute_preds(self, x, y, negatives): neg_is_pos = (y == negatives).all(-1) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) logits = logits / self.logit_temp if is_xla_tensor(logits) or neg_is_pos.any(): fillval = -float(2 ** 30) if not hasattr(self, "_inftensor"): self._inftensor = ( torch.tensor(fillval).to(x.device) if is_xla_tensor(logits) else float("-inf") ) logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor) return logits def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): return torch.floor((input_length - kernel_size) / stride + 1) conv_cfg_list = eval(self.cfg.conv_feature_layers) for i in range(len(conv_cfg_list)): input_lengths = _conv_out_length( input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2] ) return input_lengths.to(torch.long) def forward( self, source, padding_mask=None, mask=True, features_only=False, layer=None, mask_indices=None, mask_channel_indices=None, padding_count=None, ): if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None and padding_mask.any(): input_lengths = (1 - padding_mask.long()).sum(-1) # apply conv formula to get real output_lengths output_lengths = self._get_feat_extract_output_lengths(input_lengths) padding_mask = torch.zeros( features.shape[:2], dtype=features.dtype, device=features.device ) # these two operations makes sure that all values # before the output lengths indices are attended to padding_mask[ ( torch.arange(padding_mask.shape[0], device=padding_mask.device), output_lengths - 1, ) ] = 1 padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool() else: padding_mask = None time_steps_to_drop = features.size(1) % self.crop_seq_to_multiple if time_steps_to_drop != 0: features = features[:, :-time_steps_to_drop] unmasked_features = unmasked_features[:, :-time_steps_to_drop] if padding_mask is not None: padding_mask = padding_mask[:, :-time_steps_to_drop] if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) num_vars = None code_ppl = None prob_ppl = None curr_temp = None if self.input_quantizer: q = self.input_quantizer(features, produce_targets=False) features = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] features = self.project_inp(features) if mask: x, mask_indices = self.apply_mask( features, padding_mask, mask_indices=mask_indices, mask_channel_indices=mask_channel_indices, ) if not is_xla_tensor(x) and mask_indices is not None: # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. y = unmasked_features[mask_indices].view( unmasked_features.size(0), -1, unmasked_features.size(-1) ) else: y = unmasked_features else: x = features y = unmasked_features mask_indices = None x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer) if features_only: return { "x": x, "padding_mask": padding_mask, "features": unmasked_features, "layer_results": layer_results, } if self.quantizer: q = self.quantizer(y, produce_targets=False) y = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] y = self.project_q(y) if self.negatives_from_everywhere: neg_cands = self.quantizer(unmasked_features, produce_targets=False)[ "x" ] negs, _ = self.sample_negatives( neg_cands, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if self.codebook_negatives > 0: cb_negs = self.quantizer.sample_from_codebook( y.size(0) * y.size(1), self.codebook_negatives ) cb_negs = cb_negs.view( self.codebook_negatives, y.size(0), y.size(1), -1 ) # order doesnt matter cb_negs = self.project_q(cb_negs) negs = torch.cat([negs, cb_negs], dim=0) else: y = self.project_q(y) if self.negatives_from_everywhere: negs, _ = self.sample_negatives( unmasked_features, y.size(1), padding_count=padding_count, ) negs = self.project_q(negs) else: negs, _ = self.sample_negatives( y, y.size(1), padding_count=padding_count, ) if not is_xla_tensor(x): # tpu-comment: reducing the size in a dynamic way causes # too many recompilations on xla. x = x[mask_indices].view(x.size(0), -1, x.size(-1)) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) x = self.final_proj(x) x = self.compute_preds(x, y, negs) result = { "x": x, "padding_mask": padding_mask, "features_pen": features_pen, } if prob_ppl is not None: result["prob_perplexity"] = prob_ppl result["code_perplexity"] = code_ppl result["num_vars"] = num_vars result["temp"] = curr_temp return result def quantize(self, x): assert self.quantizer is not None x = self.feature_extractor(x) x = x.transpose(1, 2) x = self.layer_norm(x) return self.quantizer.forward_idx(x) def extract_features(self, source, padding_mask, mask=False, layer=None): res = self.forward( source, padding_mask, mask=mask, features_only=True, layer=layer ) return res def get_logits(self, net_output): logits = net_output["x"] logits = logits.transpose(0, 2) logits = logits.reshape(-1, logits.size(-1)) return logits def get_targets(self, sample, net_output, expand_steps=True): x = net_output["x"] return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long) def get_extra_losses(self, net_output): pen = [] if "prob_perplexity" in net_output: pen.append( (net_output["num_vars"] - net_output["prob_perplexity"]) / net_output["num_vars"] ) if "features_pen" in net_output: pen.append(net_output["features_pen"]) return pen def remove_pretraining_modules(self): self.quantizer = None self.project_q = None self.target_glu = None self.final_proj = None class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers: List[Tuple[int, int, int]], dropout: float = 0.0, mode: str = "default", conv_bias: bool = False, ): super().__init__() assert mode in {"default", "layer_norm"} def block( n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False, ): def make_conv(): conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias) nn.init.kaiming_normal_(conv.weight) return conv assert is_layer_norm and is_group_norm is False, "layer norm and group norm are exclusive" if is_layer_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast(), ), nn.GELU(), ) elif is_group_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU(), ) else: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU()) in_d = 1 self.conv_layers = nn.ModuleList() for i, cl in enumerate(conv_layers): assert len(cl) == 3, "invalid conv definition: " + str(cl) (dim, k, stride) = cl self.conv_layers.append( block( in_d, dim, k, stride, is_layer_norm=mode == "layer_norm", is_group_norm=mode == "default" and i == 0, conv_bias=conv_bias, ) ) in_d = dim def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: x = conv(x) return x class TransformerEncoder(nn.Module): def build_encoder_layer(self, args): if args.layer_type == "transformer": layer = TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, ) elif args.layer_type == "conformer": layer = ConformerWav2Vec2EncoderLayer( embed_dim=self.embedding_dim, ffn_embed_dim=args.encoder_ffn_embed_dim, attention_heads=args.encoder_attention_heads, dropout=args.dropout, depthwise_conv_kernel_size=args.depthwise_conv_kernel_size, activation_fn="swish", attn_type=args.attn_type, use_fp16=args.fp16, pos_enc_type="abs", ) layer = fsdp_wrap(layer) if args.checkpoint_activations: layer = checkpoint_wrapper(layer) return layer def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.required_seq_len_multiple = args.required_seq_len_multiple self.pos_conv = nn.Conv1d( self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=args.conv_pos // 2, groups=args.conv_pos_groups, ) dropout = 0 std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) self.layers = nn.ModuleList( [self.build_encoder_layer(args) for _ in range(args.encoder_layers)] ) self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def forward(self, x, padding_mask=None, layer=None): x, layer_results = self.extract_features(x, padding_mask, layer) if self.layer_norm_first and layer is None: x = self.layer_norm(x) return x, layer_results def extract_features(self, x, padding_mask=None, tgt_layer=None): if padding_mask is not None: x = index_put(x, padding_mask, 0) x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x = x + x_conv if not self.layer_norm_first: x = self.layer_norm(x) # pad to the sequence length dimension x, pad_length = pad_to_multiple( x, self.required_seq_len_multiple, dim=-2, value=0 ) if pad_length > 0 and padding_mask is None: padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) padding_mask[:, -pad_length:] = True else: padding_mask, _ = pad_to_multiple( padding_mask, self.required_seq_len_multiple, dim=-1, value=True ) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) layer_results = [] r = None for i, layer in enumerate(self.layers): dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False) if tgt_layer is not None: # unpad if needed if pad_length > 0: layer_results.append( ( x[:-pad_length], z[:, :-pad_length, :-pad_length] if z is not None else z, ) ) else: layer_results.append((x, z)) if i == tgt_layer: r = x break if r is not None: x = r # T x B x C -> B x T x C x = x.transpose(0, 1) # undo paddding if pad_length > 0: x = x[:, :-pad_length] return x, layer_results def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict class ConformerEncoder(TransformerEncoder): def build_encoder_layer(self, args): layer = ConformerWav2Vec2EncoderLayer( embed_dim=self.embedding_dim, ffn_embed_dim=args.encoder_ffn_embed_dim, attention_heads=args.encoder_attention_heads, dropout=args.dropout, depthwise_conv_kernel_size=args.depthwise_conv_kernel_size, activation_fn="swish", attn_type=args.attn_type, pos_enc_type=args.pos_enc_type, use_fp16=args.fp16, # only used for rope ) layer = fsdp_wrap(layer) if args.checkpoint_activations: layer = checkpoint_wrapper(layer) return layer def __init__(self, args): super().__init__(args) self.args = args self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_enc_type = args.pos_enc_type max_source_positions = self.max_positions() if self.pos_enc_type == "rel_pos": self.embed_positions = RelPositionalEncoding( max_source_positions, self.embedding_dim ) elif self.pos_enc_type == "rope": self.embed_positions = None else: raise Exception("Unsupported positional encoding type") self.layers = nn.ModuleList( [self.build_encoder_layer(args) for _ in range(args.encoder_layers)] ) self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def extract_features(self, x, padding_mask=None, tgt_layer=None): if padding_mask is not None: x = index_put(x, padding_mask, 0) # B x T x C -> T x B x C x = x.transpose(0, 1) # B X T X C here position_emb = None if self.pos_enc_type == "rel_pos": position_emb = self.embed_positions(x) if not self.layer_norm_first: x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) layer_results = [] r = None for i, layer in enumerate(self.layers): dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z = layer( x, self_attn_padding_mask=padding_mask, need_weights=False, position_emb=position_emb, ) if tgt_layer is not None: layer_results.append((x, z)) if i == tgt_layer: r = x break if r is not None: x = r # T x B x C -> B x T x C x = x.transpose(0, 1) return x, layer_results class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: float = 768, ffn_embedding_dim: float = 3072, num_attention_heads: float = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", layer_norm_first: bool = False, ) -> None: super().__init__() # Initialize parameters self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, attn_mask=self_attn_mask, ) x = self.dropout1(x) x = residual + x residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, ) x = self.dropout1(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x x = self.final_layer_norm(x) return x, attn
KosmosX-API-main
kosmosX/fairseq/fairseq/models/wav2vec/wav2vec2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn.functional as F def pad_to_multiple(x, multiple, dim=-1, value=0): # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 if x is None: return None, 0 tsz = x.size(dim) m = tsz / multiple remainder = math.ceil(m) * multiple - tsz if m.is_integer(): return x, 0 pad_offset = (0,) * (-1 - dim) * 2 return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder
KosmosX-API-main
kosmosX/fairseq/fairseq/models/wav2vec/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import logging import math from typing import Optional, Tuple from omegaconf import II import sys import torch import torch.nn as nn import torch.nn.functional as F from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GumbelVectorQuantizer, KmeansVectorQuantizer, TransposeLast, ) from fairseq.tasks import FairseqTask from fairseq.utils import buffered_arange logger = logging.getLogger(__name__) AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"]) PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"]) ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"]) VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"]) @dataclass class Wav2VecConfig(FairseqDataclass): prediction_steps: int = field( default=12, metadata={"help": "number of steps ahead to predict"} ) sample_distance: Optional[int] = field( default=None, metadata={ "help": "sample distance from target. does not work properly with cross-sampling" }, ) cross_sample_negatives: int = field( default=0, metadata={"help": "num of cross sampled negatives"} ) num_negatives: int = field( default=10, metadata={"help": "num of sampled negatives"} ) conv_feature_layers: str = field( default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]", metadata={ "help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]" }, ) conv_aggregator_layers: str = field( default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]", metadata={ "help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]" }, ) dropout: float = field( default=0.0, metadata={"help": "dropout to apply within the model"} ) dropout_features: float = field( default=0.0, metadata={"help": "dropout to apply to the features"} ) dropout_agg: float = field( default=0.0, metadata={"help": "dropout to apply after aggregation step"} ) aggregator: AGGREGATOR_CHOICES = field( default="cnn", metadata={"help": "type of aggregator to use"} ) gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"}) no_conv_bias: bool = field( default=False, metadata={"help": "if set, does not learn bias for conv layers"} ) agg_zero_pad: bool = field( default=False, metadata={"help": "if set, zero pads in aggregator instead of repl pad"}, ) skip_connections_feat: bool = field( default=False, metadata={"help": "if set, adds skip connections to the feature extractor"}, ) skip_connections_agg: bool = field( default=True, metadata={"help": "if set, adds skip connections to the aggregator"}, ) residual_scale: float = field( default=0.5, metadata={"help": "scales residual by sqrt(value)"} ) log_compression: bool = field( default=True, metadata={"help": "if set, adds a log compression to feature extractor"}, ) balanced_classes: bool = field( default=False, metadata={"help": "if set, loss is scaled to balance for number of negatives"}, ) project_features: PROJECT_FEATURES_CHOICES = field( default="none", metadata={ "help": "if not none, features are projected using the (same or new) aggregator" }, ) non_affine_group_norm: bool = field( default=False, metadata={"help": "if set, group norm is not affine"} ) offset: str = field( default="auto", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) activation: ACTIVATION_CHOICES = field( default="relu", metadata={ "help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value" }, ) vq_type: VQ_TYPE_CHOICES = field( default="none", metadata={"help": "which type of quantizer to use"} ) vq_vars: int = field( default=320, metadata={"help": "project to this many vector quantized variables per group"}, ) vq_groups: int = field( default=2, metadata={"help": "number of groups of latent variables"} ) vq_dim: int = field( default=0, metadata={ "help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups" }, ) vq_depth: int = field( default=1, metadata={"help": "number of layers for vq weight projection"} ) combine_groups: bool = field( default=False, metadata={"help": "if set, variables are shared among groups"} ) vq_temp: Tuple[float, float, float] = field( default=(2.0, 0.5, 0.999995), metadata={ "help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)" }, ) vq_gamma: float = field( default=0.25, metadata={"help": "gamma parameter for kmeans style vector quantization"}, ) infonce: bool = II("criterion.infonce") @register_model("wav2vec", dataclass=Wav2VecConfig) class Wav2VecModel(BaseFairseqModel): @classmethod def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask): """Build a new model instance.""" model = Wav2VecModel(cfg) logger.info(model) return model def __init__(self, cfg: Wav2VecConfig): super().__init__() self.prediction_steps = cfg.prediction_steps offset = cfg.offset if cfg.activation == "relu": activation = nn.ReLU() elif cfg.activation == "gelu": activation = nn.GELU() else: raise Exception("unknown activation " + cfg.activation) feature_enc_layers = eval(cfg.conv_feature_layers) self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, log_compression=cfg.log_compression, skip_connections=cfg.skip_connections_feat, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, activation=activation, ) embed = feature_enc_layers[-1][0] self.vector_quantizer = None if cfg.vq_type == "gumbel": self.vector_quantizer = GumbelVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, temp=cfg.vq_temp, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, activation=activation, weight_proj_depth=cfg.vq_depth, weight_proj_factor=2, ) elif cfg.vq_type == "kmeans": self.vector_quantizer = KmeansVectorQuantizer( dim=embed, num_vars=cfg.vq_vars, groups=cfg.vq_groups, combine_groups=cfg.combine_groups, vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed, time_first=False, gamma=cfg.vq_gamma, ) else: assert ( cfg.vq_type == "none" or cfg.vq_type is None ), "Unknown quantizer type" if cfg.offset == "auto": jin = 0 rin = 0 for _, k, stride in feature_enc_layers: if rin == 0: rin = k rin = rin + (k - 1) * jin if jin == 0: jin = stride else: jin *= stride offset = math.ceil(rin / jin) offset = int(offset) def make_aggregator(): if cfg.aggregator == "cnn": agg_layers = eval(cfg.conv_aggregator_layers) agg_dim = agg_layers[-1][0] feature_aggregator = ConvAggegator( conv_layers=agg_layers, embed=embed, dropout=cfg.dropout, skip_connections=cfg.skip_connections_agg, residual_scale=cfg.residual_scale, non_affine_group_norm=cfg.non_affine_group_norm, conv_bias=not cfg.no_conv_bias, zero_pad=cfg.agg_zero_pad, activation=activation, ) elif cfg.aggregator == "gru": agg_dim = cfg.gru_dim feature_aggregator = nn.Sequential( TransposeLast(), nn.GRU( input_size=embed, hidden_size=agg_dim, num_layers=1, dropout=cfg.dropout, ), TransposeLast(deconstruct_idx=0), ) else: raise Exception("unknown aggregator type " + cfg.aggregator) return feature_aggregator, agg_dim self.feature_aggregator, agg_dim = make_aggregator() self.wav2vec_predictions = Wav2VecPredictionsModel( in_dim=agg_dim, out_dim=embed, prediction_steps=cfg.prediction_steps, n_negatives=cfg.num_negatives, cross_sample_negatives=cfg.cross_sample_negatives, sample_distance=cfg.sample_distance, dropout=cfg.dropout, offset=offset, balanced_classes=cfg.balanced_classes, infonce=cfg.infonce, ) self.dropout_feats = nn.Dropout(p=cfg.dropout_features) self.dropout_agg = nn.Dropout(p=cfg.dropout_agg) if cfg.project_features == "none": self.project_features = None elif cfg.project_features == "same": self.project_features = self.feature_aggregator elif cfg.project_features == "new": self.project_features, _ = make_aggregator() def forward(self, source): result = {} features = self.feature_extractor(source) if self.vector_quantizer: q_res = self.vector_quantizer(features) features = q_res["x"] for k in q_res.keys(): if k != "x": result[k] = q_res[k] x = self.dropout_feats(features) x = self.feature_aggregator(x) x = self.dropout_agg(x) if self.project_features is not None: features = self.project_features(features) x, targets = self.wav2vec_predictions(x, features) result["cpc_logits"] = x result["cpc_targets"] = targets return result def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) def max_positions(self): """Maximum length supported by the model.""" return sys.maxsize def get_logits(self, net_output): logits = net_output["cpc_logits"] return logits def get_targets(self, sample, net_output): t = net_output["cpc_targets"] if isinstance(t, tuple): t = t[0] return t.contiguous() def get_target_weights(self, targets, net_output): targets = net_output["cpc_targets"] if isinstance(targets, tuple) and targets[-1] is not None: return targets[-1] return None def get_extra_losses(self, net_output): loss = None if "prob_perplexity" in net_output: loss = net_output["num_vars"] - net_output["prob_perplexity"] elif "kmeans_loss" in net_output: loss = net_output["kmeans_loss"] return loss def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm, activation, ): super().__init__() def block(n_in, n_out, k, stride): return nn.Sequential( nn.Conv1d(n_in, n_out, k, stride=stride, bias=False), nn.Dropout(p=dropout), norm_block( is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm ), activation, ) in_d = 1 self.conv_layers = nn.ModuleList() for dim, k, stride in conv_layers: self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.log_compression = log_compression self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: residual = x x = conv(x) if self.skip_connections and x.size(1) == residual.size(1): tsz = x.size(2) r_tsz = residual.size(2) residual = residual[..., :: r_tsz // tsz][..., :tsz] x = (x + residual) * self.residual_scale if self.log_compression: x = x.abs() x = x + 1 x = x.log() return x class ZeroPad1d(nn.Module): def __init__(self, pad_left, pad_right): super().__init__() self.pad_left = pad_left self.pad_right = pad_right def forward(self, x): return F.pad(x, (self.pad_left, self.pad_right)) class ConvAggegator(nn.Module): def __init__( self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias, zero_pad, activation, ): super().__init__() def block(n_in, n_out, k, stride): # padding dims only really make sense for stride = 1 ka = k // 2 kb = ka - 1 if k % 2 == 0 else ka pad = ( ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0)) ) return nn.Sequential( pad, nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias), nn.Dropout(p=dropout), norm_block(False, n_out, affine=not non_affine_group_norm), activation, ) in_d = embed self.conv_layers = nn.ModuleList() self.residual_proj = nn.ModuleList() for dim, k, stride in conv_layers: if in_d != dim and skip_connections: self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False)) else: self.residual_proj.append(None) self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.conv_layers = nn.Sequential(*self.conv_layers) self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): for rproj, conv in zip(self.residual_proj, self.conv_layers): residual = x x = conv(x) if self.skip_connections: if rproj is not None: residual = rproj(residual) x = (x + residual) * self.residual_scale return x class Wav2VecPredictionsModel(nn.Module): def __init__( self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance, dropout, offset, balanced_classes, infonce, ): super().__init__() self.n_negatives = n_negatives self.cross_sample_negatives = cross_sample_negatives self.sample_distance = sample_distance self.project_to_steps = nn.ConvTranspose2d( in_dim, out_dim, (1, prediction_steps) ) self.dropout = nn.Dropout(p=dropout) self.offset = offset self.balanced_classes = balanced_classes self.infonce = infonce def sample_negatives(self, y): bsz, fsz, tsz = y.shape y = y.transpose(0, 1) # BCT -> CBT y = y.contiguous().view(fsz, -1) # CBT => C(BxT) cross_high = tsz * bsz high = tsz if self.sample_distance is None else min(tsz, self.sample_distance) assert high > 1 neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz)) with torch.no_grad(): if self.n_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * tsz) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * tsz), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[..., neg_idxs.view(-1)] negs = negs.view( fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz ).permute( 2, 1, 0, 3 ) # to NxBxCxT return negs def forward(self, x, y): x = x.unsqueeze(-1) x = self.project_to_steps(x) # BxCxTxS x = self.dropout(x) negatives = self.sample_negatives(y) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T copies = targets.size(0) bsz, dim, tsz, steps = x.shape steps = min(steps, tsz - self.offset) predictions = x.new( bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz ) if self.infonce: labels = predictions.new_full( (predictions.shape[0] // copies,), 0, dtype=torch.long ) else: labels = torch.zeros_like(predictions) weights = ( torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes and not self.infonce else None ) start = end = 0 for i in range(steps): offset = i + self.offset end = start + (tsz - offset) * bsz * copies if self.infonce: predictions[start:end] = torch.einsum( "bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:] ).flatten() else: pos_num = (end - start) // copies predictions[start:end] = torch.einsum( "bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:] ).flatten() labels[start : start + pos_num] = 1.0 if weights is not None: weights[start : start + pos_num] = 1.0 start = end assert end == predictions.numel(), "{} != {}".format(end, predictions.numel()) if self.infonce: predictions = predictions.view(-1, copies) else: if weights is not None: labels = (labels, weights) return predictions, labels
KosmosX-API-main
kosmosX/fairseq/fairseq/models/wav2vec/wav2vec.py
import argparse import logging import torch.nn as nn import fairseq.checkpoint_utils from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerDecoder from fairseq.models.roberta import model as roberta logger = logging.getLogger(__name__) @register_model("roberta_enc_dec") class RobertaEncDecModel(FairseqEncoderDecoderModel): @staticmethod def add_args(parser): parser.add_argument( "--pretrained-mlm-checkpoint", default=None, type=str, metavar="PRETRAINED", help="path to pretrained mlm checkpoint", ) parser.add_argument( "--pretrained-decoder", action="store_true", help="reload decoder" ) parser.add_argument( "--hack-layernorm-embedding", action="store_true", help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False", ) parser.add_argument( "--share-decoder-input-output-embed", action="store_true", help="share decoder input and output embeddings", ) parser.add_argument( "--share-all-embeddings", action="store_true", help="share encoder, decoder and output embeddings" " (requires shared dictionary and embed dim)", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_enc_dec_architecture(args) if args.pretrained_mlm_checkpoint: arg_overrides = None if args.hack_layernorm_embedding: arg_overrides = {"layernorm_embedding": False} loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task( [args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides ) ([roberta_enc], _cfg, _task) = loaded else: # Do we need to edit untie_weights here ? share_in_out = ( args.share_decoder_input_output_embed or args.share_all_embeddings ) args.untie_weights_roberta = not share_in_out if args.hack_layernorm_embedding: args.layernorm_embedding = False args.encoder_normalize_before = False roberta_enc = roberta.RobertaModel.build_model(args, task) return cls.from_roberta(roberta_enc, args, task.source_dictionary) @staticmethod def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary): encoder = roberta_enc.encoder.sentence_encoder vocab_size, embed_dim = encoder.embed_tokens.weight.shape if args.share_all_embeddings: lm_head = roberta_enc.encoder.lm_head assert encoder.embed_tokens.weight is lm_head.weight, ( "Can't use --share-all-embeddings with a model " "that was pretraiend with --untie-weights-roberta_enc" ) else: lm_head = roberta.RobertaLMHead( embed_dim, vocab_size, roberta_enc.args.activation_fn ) dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad()) if args.share_all_embeddings or args.share_decoder_input_output_embed: # Note: I wasn't able to use Embedding _weight parameter to achive this sharing. dec_embs.weight = lm_head.weight decoder = TransformerDecoder( RobertaEncDecModel.read_args_from_roberta(roberta_enc.args), dictionary, dec_embs, no_encoder_attn=False, output_projection=lm_head, ) if getattr(args, "pretrained_decoder", False): decoder_dict = encoder.state_dict() # TODO: hide setting "encoder_attn" layers behind a flag. for k, w in list(decoder_dict.items()): if ".self_attn" in k: k_enc_attn = k.replace(".self_attn", ".encoder_attn") decoder_dict[k_enc_attn] = w.detach().clone() for k, w in lm_head.state_dict().items(): decoder_dict["output_projection." + k] = w missing_keys, unexpected_keys = decoder.load_state_dict( decoder_dict, strict=False ) # missing_keys = [m for m in missing_keys if ".encoder_attn" not in m] assert not missing_keys and not unexpected_keys, ( "Failed to load state dict. " f"Missing keys: {missing_keys}. " f"Unexpected keys: {unexpected_keys}." ) if args.share_all_embeddings: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is decoder.embed_tokens.weight elif args.share_decoder_input_output_embed: assert decoder.output_projection.weight is decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight else: assert decoder.output_projection.weight is not decoder.embed_tokens.weight assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight return RobertaEncDecModel(encoder, decoder) @staticmethod def read_args_from_roberta(roberta_args: argparse.Namespace): # TODO: this would become easier if encoder/decoder where using a similar # TransformerConfig object args = argparse.Namespace(**vars(roberta_args)) attr_map = [ ("encoder_attention_heads", "decoder_attention_heads"), ("encoder_embed_dim", "decoder_embed_dim"), ("encoder_embed_dim", "decoder_output_dim"), ("encoder_normalize_before", "decoder_normalize_before"), ("encoder_layers_to_keep", "decoder_layers_to_keep"), ("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"), ("encoder_layerdrop", "decoder_layerdrop"), ("encoder_layers", "decoder_layers"), ("encoder_learned_pos", "decoder_learned_pos"), # should this be set from here ? ("max_positions", "max_target_positions"), ] for k1, k2 in attr_map: setattr(args, k2, getattr(roberta_args, k1)) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta return args def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" super().upgrade_state_dict_named(state_dict, name) old_keys = list(state_dict.keys()) # rename decoder -> encoder before upgrading children modules for k in old_keys: if k.startswith(prefix + "encoder.lm_head"): state_dict.pop(k) continue new_k = k new_k = new_k.replace(".sentence_encoder.", ".") new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.") if k == new_k: continue # print(k, "->", new_k) state_dict[new_k] = state_dict.pop(k) @register_model_architecture("roberta_enc_dec", "roberta_enc_dec") def base_enc_dec_architecture(args): args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False) args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None) args.pretrained_decoder = getattr(args, "pretrained_decoder", None) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) roberta.base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/enc_dec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ GottBERT: a pure German Language Model """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("gottbert") class GottbertModel(RobertaModel): @classmethod def hub_models(cls): return { "gottbert-base": "https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="hf_byte_bpe", bpe_vocab="vocab.json", bpe_merges="merges.txt", bpe_add_prefix_space=False, **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, bpe_vocab=bpe_vocab, bpe_merges=bpe_merges, bpe_add_prefix_space=bpe_add_prefix_space, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/model_gottbert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Unsupervised Cross-lingual Representation Learning at Scale """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("xlmr") class XLMRModel(RobertaModel): @classmethod def hub_models(cls): return { "xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz", "xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz", "xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz", "xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="sentencepiece", **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/model_xlmr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import Counter from typing import List import torch def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]): """ Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy). Args: roberta (RobertaHubInterface): RoBERTa instance bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)` other_tokens (List[str]): other tokens of shape `(T_words)` Returns: List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*. """ assert bpe_tokens.dim() == 1 assert bpe_tokens[0] == 0 def clean(text): return text.strip() # remove whitespaces to simplify alignment bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens] bpe_tokens = [ clean(roberta.bpe.decode(x) if x not in {"<s>", ""} else x) for x in bpe_tokens ] other_tokens = [clean(str(o)) for o in other_tokens] # strip leading <s> bpe_tokens = bpe_tokens[1:] assert "".join(bpe_tokens) == "".join(other_tokens) # create alignment from every word to a list of BPE tokens alignment = [] bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1)) j, bpe_tok = next(bpe_toks) for other_tok in other_tokens: bpe_indices = [] while True: if other_tok.startswith(bpe_tok): bpe_indices.append(j) other_tok = other_tok[len(bpe_tok) :] try: j, bpe_tok = next(bpe_toks) except StopIteration: j, bpe_tok = None, None elif bpe_tok.startswith(other_tok): # other_tok spans multiple BPE tokens bpe_indices.append(j) bpe_tok = bpe_tok[len(other_tok) :] other_tok = "" else: raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok)) if other_tok == "": break assert len(bpe_indices) > 0 alignment.append(bpe_indices) assert len(alignment) == len(other_tokens) return alignment def align_features_to_words(roberta, features, alignment): """ Align given features to words. Args: roberta (RobertaHubInterface): RoBERTa instance features (torch.Tensor): features to align of shape `(T_bpe x C)` alignment: alignment between BPE tokens and words returned by func:`align_bpe_to_words`. """ assert features.dim() == 2 bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices) assert bpe_counts[0] == 0 # <s> shouldn't be aligned denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))]) weighted_features = features / denom.unsqueeze(-1) output = [weighted_features[0]] largest_j = -1 for bpe_indices in alignment: output.append(weighted_features[bpe_indices].sum(dim=0)) largest_j = max(largest_j, *bpe_indices) for j in range(largest_j + 1, len(features)): output.append(weighted_features[j]) output = torch.stack(output) assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4) return output def spacy_nlp(): if getattr(spacy_nlp, "_nlp", None) is None: try: from spacy.lang.en import English spacy_nlp._nlp = English() except ImportError: raise ImportError("Please install spacy with: pip install spacy") return spacy_nlp._nlp def spacy_tokenizer(): if getattr(spacy_tokenizer, "_tokenizer", None) is None: try: nlp = spacy_nlp() spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp) except ImportError: raise ImportError("Please install spacy with: pip install spacy") return spacy_tokenizer._tokenizer
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/alignment_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .hub_interface import * # noqa from .model import * # noqa from .enc_dec import * # noqa from .model_camembert import * # noqa from .model_gottbert import * # noqa from .model_xlmr import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface logger = logging.getLogger(__name__) @register_model("roberta") class RobertaModel(FairseqEncoderModel): @classmethod def hub_models(cls): return { "roberta.base": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz", "roberta.large": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz", "roberta.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz", "roberta.large.wsc": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz", } def __init__(self, args, encoder): super().__init__(encoder) self.args = args # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--encoder-layers", type=int, metavar="L", help="num encoder layers" ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="H", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="F", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="A", help="num encoder attention heads", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--pooler-activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use for pooler layer", ) parser.add_argument( "--encoder-normalize-before", action="store_true", help="apply layernorm before each encoder block", ) parser.add_argument( "--layernorm-embedding", action="store_true", help="add layernorm to embedding", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability" ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN", ) parser.add_argument( "--pooler-dropout", type=float, metavar="D", help="dropout probability in the masked_lm pooler layers", ) parser.add_argument( "--max-positions", type=int, help="number of positional embeddings to learn" ) parser.add_argument( "--load-checkpoint-heads", action="store_true", help="(re-)register and load heads when loading checkpoints", ) parser.add_argument( "--untie-weights-roberta", action="store_true", help="Untie weights between embeddings and classifiers in RoBERTa", ) # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument( "--encoder-layerdrop", type=float, metavar="D", default=0, help="LayerDrop probability for encoder", ) parser.add_argument( "--encoder-layers-to-keep", default=None, help="which layers to *keep* when pruning as a comma-separated list", ) # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) parser.add_argument( "--quant-noise-pq", type=float, metavar="D", default=0, help="iterative PQ quantization noise at training time", ) parser.add_argument( "--quant-noise-pq-block-size", type=int, metavar="D", default=8, help="block size of quantization noise at training time", ) parser.add_argument( "--quant-noise-scalar", type=float, metavar="D", default=0, help="scalar quantization noise and scalar quantization at training time", ) # args for "Better Fine-Tuning by Reducing Representational Collapse" (Aghajanyan et al. 2020) parser.add_argument( "--spectral-norm-classification-head", action="store_true", default=False, help="Apply spectral normalization on the classification head", ) # args for Fully Sharded Data Parallel (FSDP) training parser.add_argument( "--min-params-to-wrap", type=int, metavar="D", default=DEFAULT_MIN_PARAMS_TO_WRAP, help=( "minimum number of params for a layer to be wrapped with FSDP() when " "training with --ddp-backend=fully_sharded. Smaller values will " "improve memory efficiency, but may make torch.distributed " "communication less efficient due to smaller input sizes. This option " "is set to 0 (i.e., always wrap) when --checkpoint-activations or " "--offload-activations are passed." ), ) # args for AdaPruning # In short, it adds regularizarion for the multihead attention module and feed forward neural nets # For more details, please refer to the paper https://openreview.net/forum?id=_CMSV7FTzGI parser.add_argument( "--mha-reg-scale-factor", type=float, metavar="D", default=0.0, help="scaling factor for regularization term in adptive pruning, recommendation is 0.000375", ) parser.add_argument( "--ffn-reg-scale-factor", type=float, metavar="D", default=0.0, help="scaling factor for regularization term in adptive pruning, recommendation is 0.000375", ) parser.add_argument( "--mha-heads-to-keep", type=int, metavar="D", default=-1, help="number of heads to keep in each multi-head attention module, -1 means keeping all heads", ) parser.add_argument( "--ffn-blocks-to-remove", type=int, metavar="D", default=-1, help="number of feedforward blocks to remove in each transformer layer, -1 means keeping all ffn blocks", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" from omegaconf import OmegaConf if OmegaConf.is_config(args): OmegaConf.set_struct(args, False) # make sure all arguments are present base_architecture(args) if not safe_hasattr(args, "max_positions"): if not safe_hasattr(args, "tokens_per_sample"): args.tokens_per_sample = task.max_positions() args.max_positions = args.tokens_per_sample encoder = RobertaEncoder(args, task.source_dictionary) if OmegaConf.is_config(args): OmegaConf.set_struct(args, True) return cls(args, encoder) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs, ): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def _get_adaptive_head_loss(self): norm_loss = 0 scaling = float(self.args.mha_reg_scale_factor) for layer in self.encoder.sentence_encoder.layers: norm_loss_layer = 0 for i in range(layer.self_attn.num_heads): start_idx = i * layer.self_attn.head_dim end_idx = (i + 1) * layer.self_attn.head_dim norm_loss_layer += scaling * ( torch.sum( torch.abs( layer.self_attn.q_proj.weight[ start_idx:end_idx, ] ) ) + torch.sum( torch.abs(layer.self_attn.q_proj.bias[start_idx:end_idx]) ) ) norm_loss_layer += scaling * ( torch.sum( torch.abs( layer.self_attn.k_proj.weight[ start_idx:end_idx, ] ) ) + torch.sum( torch.abs(layer.self_attn.k_proj.bias[start_idx:end_idx]) ) ) norm_loss_layer += scaling * ( torch.sum( torch.abs( layer.self_attn.v_proj.weight[ start_idx:end_idx, ] ) ) + torch.sum( torch.abs(layer.self_attn.v_proj.bias[start_idx:end_idx]) ) ) norm_loss += norm_loss_layer return norm_loss def _get_adaptive_ffn_loss(self): ffn_scale_factor = float(self.args.ffn_reg_scale_factor) filter_loss = 0 for layer in self.encoder.sentence_encoder.layers: filter_loss += torch.sum( torch.abs(layer.fc1.weight * ffn_scale_factor) ) + torch.sum(torch.abs(layer.fc2.weight * ffn_scale_factor)) filter_loss += torch.sum( torch.abs(layer.fc1.bias * ffn_scale_factor) ) + torch.sum(torch.abs(layer.fc2.bias * ffn_scale_factor)) return filter_loss def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = RobertaClassificationHead( input_dim=self.args.encoder_embed_dim, inner_dim=inner_dim or self.args.encoder_embed_dim, num_classes=num_classes, activation_fn=self.args.pooler_activation_fn, pooler_dropout=self.args.pooler_dropout, q_noise=self.args.quant_noise_pq, qn_block_size=self.args.quant_noise_pq_block_size, do_spectral_norm=self.args.spectral_norm_classification_head, ) @property def supported_targets(self): return {"self"} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="gpt2", **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) logger.info(x["args"]) return RobertaHubInterface(x["args"], x["task"], x["models"][0]) def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" # rename decoder -> encoder before upgrading children modules for k in list(state_dict.keys()): if k.startswith(prefix + "decoder"): new_k = prefix + "encoder" + k[len(prefix + "decoder") :] state_dict[new_k] = state_dict[k] del state_dict[k] # rename emb_layer_norm -> layernorm_embedding for k in list(state_dict.keys()): if ".emb_layer_norm." in k: new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.") state_dict[new_k] = state_dict[k] del state_dict[k] # upgrade children modules super().upgrade_state_dict_named(state_dict, name) # Handle new classification heads present in the state dict. current_head_names = ( [] if not hasattr(self, "classification_heads") else self.classification_heads.keys() ) keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + "classification_heads."): continue head_name = k[len(prefix + "classification_heads.") :].split(".")[0] num_classes = state_dict[ prefix + "classification_heads." + head_name + ".out_proj.weight" ].size(0) inner_dim = state_dict[ prefix + "classification_heads." + head_name + ".dense.weight" ].size(0) if getattr(self.args, "load_checkpoint_heads", False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( "deleting classification head ({}) from checkpoint " "not present in current model: {}".format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( "deleting classification head ({}) from checkpoint " "with different dimensions than current model: {}".format( head_name, k ) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, "classification_heads"): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + "classification_heads." + k not in state_dict: logger.info("Overwriting " + prefix + "classification_heads." + k) state_dict[prefix + "classification_heads." + k] = v class RobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the masked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias return x class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, q_noise=0, qn_block_size=8, do_spectral_norm=False, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = apply_quant_noise_( nn.Linear(inner_dim, num_classes), q_noise, qn_block_size ) if do_spectral_norm: if q_noise != 0: raise NotImplementedError( "Attempting to use Spectral Normalization with Quant Noise. This is not officially supported" ) self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class RobertaEncoder(FairseqEncoder): """RoBERTa encoder.""" def __init__(self, args, dictionary): super().__init__(dictionary) # set any missing default values base_architecture(args) self.args = args if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) embed_tokens = self.build_embedding( len(dictionary), args.encoder_embed_dim, dictionary.pad() ) self.sentence_encoder = self.build_encoder(args, dictionary, embed_tokens) self.lm_head = self.build_lm_head( embed_dim=args.encoder_embed_dim, output_dim=len(dictionary), activation_fn=args.activation_fn, weight=( self.sentence_encoder.embed_tokens.weight if not args.untie_weights_roberta else None ), ) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_encoder(self, args, dictionary, embed_tokens): encoder = TransformerEncoder(args, dictionary, embed_tokens) encoder.apply(init_bert_params) return encoder def build_lm_head(self, embed_dim, output_dim, activation_fn, weight): return RobertaLMHead(embed_dim, output_dim, activation_fn, weight) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused, ): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`. """ x, extra = self.extract_features( src_tokens, return_all_hiddens=return_all_hiddens ) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs): encoder_out = self.sentence_encoder( src_tokens, return_all_hiddens=return_all_hiddens, token_embeddings=kwargs.get("token_embeddings", None), ) # T x B x C -> B x T x C features = encoder_out["encoder_out"][0].transpose(0, 1) inner_states = encoder_out["encoder_states"] if return_all_hiddens else None return features, {"inner_states": inner_states} def output_layer(self, features, masked_tokens=None, **unused): return self.lm_head(features, masked_tokens) def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions @register_model_architecture("roberta", "roberta") def base_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 12) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0) args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0) args.max_source_positions = safe_getattr(args, "max_positions", 512) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) # BERT has a few structural differences compared to the original Transformer args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = safe_getattr( args, "encoder_normalize_before", False ) args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh") args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False) # Adaptive input config args.adaptive_input = safe_getattr(args, "adaptive_input", False) # LayerDrop config args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0) args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None) # Quantization noise config args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) # R4F config args.spectral_norm_classification_head = safe_getattr( args, "spectral_norm_classification_head", False ) @register_model_architecture("roberta", "roberta_prenorm") def roberta_prenorm_architecture(args): args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True) base_architecture(args) @register_model_architecture("roberta", "roberta_base") def roberta_base_architecture(args): base_architecture(args) @register_model_architecture("roberta", "roberta_large") def roberta_large_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 24) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args) @register_model_architecture("roberta", "xlm") def xlm_architecture(args): args.encoder_layers = safe_getattr(args, "encoder_layers", 16) args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280) args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4) args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ CamemBERT: a Tasty French Language Model """ from fairseq.models import register_model from .hub_interface import RobertaHubInterface from .model import RobertaModel @register_model("camembert") class CamembertModel(RobertaModel): @classmethod def hub_models(cls): return { "camembert": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert.v0": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert-base": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz", "camembert-large": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz", "camembert-base-ccnet": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz", "camembert-base-ccnet-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz", "camembert-base-wikipedia-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz", "camembert-base-oscar-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz", } @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", bpe="sentencepiece", **kwargs ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return RobertaHubInterface(x["args"], x["task"], x["models"][0])
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/model_camembert.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders class RobertaHubInterface(nn.Module): """A simple PyTorch Hub interface to RoBERTa. Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta """ def __init__(self, cfg, task, model): super().__init__() self.cfg = cfg self.task = task self.model = model self.bpe = encoders.build_bpe(cfg.bpe) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode( self, sentence: str, *addl_sentences, no_separator=False ) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`) and we use an extra end-of-sentence (`</s>`) as a separator. Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> roberta.encode('Hello world').tolist() [0, 31414, 232, 2] >>> roberta.encode(' world').tolist() [0, 232, 2] >>> roberta.encode('world').tolist() [0, 8331, 2] """ bpe_sentence = "<s> " + self.bpe.encode(sentence) + " </s>" for s in addl_sentences: bpe_sentence += " </s>" if not no_separator else "" bpe_sentence += " " + self.bpe.encode(s) + " </s>" tokens = self.task.source_dictionary.encode_line( bpe_sentence, append_eos=False, add_if_not_exist=False ) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = tokens == self.task.source_dictionary.eos() doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [ self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences ] if len(sentences) == 1: return sentences[0] return sentences def extract_features( self, tokens: torch.LongTensor, return_all_hiddens: bool = False ) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > self.model.max_positions(): raise ValueError( "tokens exceeds maximum length: {} > {}".format( tokens.size(-1), self.model.max_positions() ) ) features, extra = self.model( tokens.to(device=self.device), features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra["inner_states"] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): features = self.extract_features(tokens.to(device=self.device)) logits = self.model.classification_heads[head](features) if return_logits: return logits return F.log_softmax(logits, dim=-1) def extract_features_aligned_to_words( self, sentence: str, return_all_hiddens: bool = False ) -> torch.Tensor: """Extract RoBERTa features, aligned to spaCy's word-level tokenizer.""" from fairseq.models.roberta import alignment_utils from spacy.tokens import Doc nlp = alignment_utils.spacy_nlp() tokenizer = alignment_utils.spacy_tokenizer() # tokenize both with GPT-2 BPE and spaCy bpe_toks = self.encode(sentence) spacy_toks = tokenizer(sentence) spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)] alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws) # extract features and align them features = self.extract_features( bpe_toks, return_all_hiddens=return_all_hiddens ) features = features.squeeze(0) aligned_feats = alignment_utils.align_features_to_words( self, features, alignment ) # wrap in spaCy Doc doc = Doc( nlp.vocab, words=["<s>"] + [x.text for x in spacy_toks] + ["</s>"], spaces=[True] + [x.endswith(" ") for x in spacy_toks_ws[:-1]] + [True, False], ) assert len(doc) == aligned_feats.size(0) doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i] return doc def fill_mask(self, masked_input: str, topk: int = 5): masked_token = "<mask>" assert ( masked_token in masked_input and masked_input.count(masked_token) == 1 ), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format( masked_token ) text_spans = masked_input.split(masked_token) text_spans_bpe = ( (" {0} ".format(masked_token)) .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans]) .strip() ) tokens = self.task.source_dictionary.encode_line( "<s> " + text_spans_bpe + " </s>", append_eos=False, add_if_not_exist=False, ) masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False) if tokens.dim() == 1: tokens = tokens.unsqueeze(0) with utils.model_eval(self.model): features, extra = self.model( tokens.long().to(device=self.device), features_only=False, return_all_hiddens=False, ) logits = features[0, masked_index, :].squeeze() prob = logits.softmax(dim=0) values, index = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = self.task.source_dictionary.string(index) topk_filled_outputs = [] for index, predicted_token_bpe in enumerate( topk_predicted_token_bpe.split(" ") ): predicted_token = self.bpe.decode(predicted_token_bpe) # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306 if predicted_token_bpe.startswith("\u2581"): predicted_token = " " + predicted_token if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append( ( masked_input.replace( " {0}".format(masked_token), predicted_token ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, ) ) return topk_filled_outputs def disambiguate_pronoun(self, sentence: str) -> bool: """ Usage:: >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.') True >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.') 'The trophy' """ assert hasattr( self.task, "disambiguate_pronoun" ), "roberta.disambiguate_pronoun() requires a model trained with the WSC task." with utils.model_eval(self.model): return self.task.disambiguate_pronoun( self.model, sentence, use_cuda=self.device.type == "cuda" )
KosmosX-API-main
kosmosX/fairseq/fairseq/models/roberta/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the models/huggingface/ directory models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file module = importlib.import_module("fairseq.models.huggingface." + model_name)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/huggingface/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, List, Optional import torch from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) logger = logging.getLogger(__name__) DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model("hf_gpt2") class HuggingFaceGPT2LanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--embed-dim', type=int, metavar='N', help='embedding dimension') parser.add_argument('--num-attention-heads', type=int, metavar='N', help='num attention heads') parser.add_argument('--num-layers', type=int, metavar='N', help='num layers') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability for all fully connected layers ' 'in the embeddings, encoder, and pooler') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" default_architecture(args) return cls(HuggingFaceGPT2Decoder(args, task)) class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder): def __init__(self, args, task): try: from transformers import GPT2Config, GPT2LMHeadModel except ImportError: raise ImportError( "\n\nPlease install huggingface/transformers with:" "\n\n pip install transformers" ) super().__init__(task.target_dictionary) config = GPT2Config( vocab_size=len(task.target_dictionary), n_positions=args.max_target_positions + 1, n_ctx=args.max_target_positions, n_embd=args.embed_dim, n_layer=args.num_layers, n_head=args.num_attention_heads, resid_pdrop=args.dropout, embd_pdrop=args.dropout, attn_pdrop=args.attention_dropout, layer_norm_epsilon=1e-6, ) self.model = GPT2LMHeadModel(config) # set zero embedding for padding symbol self.pad_idx = task.target_dictionary.pad() self.model.transformer.wte.weight.data[self.pad_idx].zero_() self.model.transformer.wpe.weight.data[0].zero_() def forward( self, prev_output_tokens, src_lengths=None, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): features = self.extract_features(prev_output_tokens, incremental_state) lm_logits = self.model.lm_head(features) return (lm_logits,) def extract_features( self, prev_output_tokens, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, ): if incremental_state: past = self.get_incremental_state("past") else: past = None # don't attend to padding symbols attention_mask = prev_output_tokens.ne(self.pad_idx).int() # set position ids to exclude padding symbols position_ids = attention_mask * ( torch.arange(1, 1 + prev_output_tokens.size(1)) .to(prev_output_tokens) .repeat(prev_output_tokens.size(0), 1) ) outputs = self.model.transformer( input_ids=prev_output_tokens, past=past, attention_mask=attention_mask, position_ids=position_ids, ) last_hidden_states = outputs[0] if incremental_state: self.set_incremental_state(incremental_state, "past", outputs[1]) return last_hidden_states def max_positions(self): return self.model.config.n_positions - 1 @register_model_architecture("hf_gpt2", "hf_gpt2") def default_architecture(args): if getattr(args, "max_target_positions", None) is None: args.max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) args.embed_dim = getattr(args, "embed_dim", 768) args.num_attention_heads = getattr(args, "num_attention_heads", 12) args.num_layers = getattr(args, "num_layers", 12) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) @register_model_architecture("hf_gpt2", "hf_gpt2_medium") def hf_gpt2_medium(args): args.embed_dim = getattr(args, "embed_dim", 1024) args.num_attention_heads = getattr(args, "num_attention_heads", 16) args.num_layers = getattr(args, "num_layers", 24) default_architecture(args) @register_model_architecture("hf_gpt2", "hf_gpt2_large") def hf_gpt2_large(args): args.embed_dim = getattr(args, "embed_dim", 1280) args.num_attention_heads = getattr(args, "num_attention_heads", 20) args.num_layers = getattr(args, "num_layers", 36) default_architecture(args) @register_model_architecture("hf_gpt2", "hf_gpt2_xl") def hf_gpt2_xl(args): args.embed_dim = getattr(args, "embed_dim", 1600) args.num_attention_heads = getattr(args, "num_attention_heads", 25) args.num_layers = getattr(args, "num_layers", 48) default_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/huggingface/hf_gpt2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import json from typing import Dict import numpy as np import torch from torch import nn import torch.nn.functional as F from fairseq.data.audio.audio_utils import ( get_window, get_fourier_basis, get_mel_filters, TTSSpectrogram, ) from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig from fairseq.models.text_to_speech.codehifigan import CodeGenerator as CodeHiFiGANModel from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel logger = logging.getLogger(__name__) class PseudoInverseMelScale(torch.nn.Module): def __init__(self, n_stft, n_mels, sample_rate, f_min, f_max) -> None: super(PseudoInverseMelScale, self).__init__() self.n_mels = n_mels basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max) basis = torch.pinverse(basis) # F x F_mel self.register_buffer("basis", basis) def forward(self, melspec: torch.Tensor) -> torch.Tensor: # pack batch shape = melspec.shape # B_1 x ... x B_K x F_mel x T n_mels, time = shape[-2], shape[-1] melspec = melspec.view(-1, n_mels, time) freq, _ = self.basis.size() # F x F_mel assert self.n_mels == n_mels, (self.n_mels, n_mels) specgram = self.basis.matmul(melspec).clamp(min=0) # unpack batch specgram = specgram.view(shape[:-2] + (freq, time)) return specgram class GriffinLim(torch.nn.Module): def __init__( self, n_fft: int, win_length: int, hop_length: int, n_iter: int, window_fn=torch.hann_window, ): super(GriffinLim, self).__init__() self.transform = TTSSpectrogram( n_fft, win_length, hop_length, return_phase=True ) basis = get_fourier_basis(n_fft) basis = torch.pinverse(n_fft / hop_length * basis).T[:, None, :] basis *= get_window(window_fn, n_fft, win_length) self.register_buffer("basis", basis) self.n_fft = n_fft self.win_length = win_length self.hop_length = hop_length self.n_iter = n_iter self.tiny = 1.1754944e-38 @classmethod def get_window_sum_square( cls, n_frames, hop_length, win_length, n_fft, window_fn=torch.hann_window ) -> torch.Tensor: w_sq = get_window(window_fn, n_fft, win_length) ** 2 n = n_fft + hop_length * (n_frames - 1) x = torch.zeros(n, dtype=torch.float32) for i in range(n_frames): ofst = i * hop_length x[ofst : min(n, ofst + n_fft)] += w_sq[: max(0, min(n_fft, n - ofst))] return x def inverse(self, magnitude: torch.Tensor, phase) -> torch.Tensor: x = torch.cat( [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 ) x = F.conv_transpose1d(x, self.basis, stride=self.hop_length) win_sum_sq = self.get_window_sum_square( magnitude.shape[-1], hop_length=self.hop_length, win_length=self.win_length, n_fft=self.n_fft, ).to(magnitude.device) # remove modulation effects approx_nonzero_indices = win_sum_sq > self.tiny x[:, :, approx_nonzero_indices] /= win_sum_sq[approx_nonzero_indices] x *= self.n_fft / self.hop_length x = x[:, :, self.n_fft // 2 :] x = x[:, :, : -self.n_fft // 2 :] return x def forward(self, specgram: torch.Tensor) -> torch.Tensor: angles = np.angle(np.exp(2j * np.pi * np.random.rand(*specgram.shape))) angles = torch.from_numpy(angles).to(specgram) _specgram = specgram.view(-1, specgram.shape[-2], specgram.shape[-1]) waveform = self.inverse(_specgram, angles).squeeze(1) for _ in range(self.n_iter): _, angles = self.transform(waveform) waveform = self.inverse(_specgram, angles).squeeze(1) return waveform.squeeze(0) class GriffinLimVocoder(nn.Module): def __init__( self, sample_rate, win_size, hop_size, n_fft, n_mels, f_min, f_max, window_fn, spec_bwd_max_iter=32, fp16=False, ): super().__init__() self.inv_mel_transform = PseudoInverseMelScale( n_stft=n_fft // 2 + 1, n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max, ) self.gl_transform = GriffinLim( n_fft=n_fft, win_length=win_size, hop_length=hop_size, window_fn=window_fn, n_iter=spec_bwd_max_iter, ) if fp16: self.half() self.inv_mel_transform.half() self.gl_transform.half() else: self.float() self.inv_mel_transform.float() self.gl_transform.float() def forward(self, x): # x: (B x) T x D -> (B x) 1 x T # NOTE: batched forward produces noisier waveform. recommend running # one utterance at a time self.eval() x = x.exp().transpose(-1, -2) x = self.inv_mel_transform(x) x = self.gl_transform(x) return x @classmethod def from_data_cfg(cls, args, data_cfg: S2TDataConfig): feat_cfg = data_cfg.config["features"] window_fn = getattr(torch, feat_cfg["window_fn"] + "_window") return cls( sample_rate=feat_cfg["sample_rate"], win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]), hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]), n_fft=feat_cfg["n_fft"], n_mels=feat_cfg["n_mels"], f_min=feat_cfg["f_min"], f_max=feat_cfg["f_max"], window_fn=window_fn, spec_bwd_max_iter=args.spec_bwd_max_iter, fp16=args.fp16, ) class HiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: super().__init__() self.model = HiFiGANModel(model_cfg) state_dict = torch.load(checkpoint_path) self.model.load_state_dict(state_dict["generator"]) if fp16: self.model.half() logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}") def forward(self, x: torch.Tensor) -> torch.Tensor: # (B x) T x D -> (B x) 1 x T model = self.model.eval() if len(x.shape) == 2: return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0) else: return model(x.transpose(-1, -2)).detach() @classmethod def from_data_cfg(cls, args, data_cfg: S2TDataConfig): vocoder_cfg = data_cfg.vocoder assert vocoder_cfg.get("type", "griffin_lim") == "hifigan" with open(vocoder_cfg["config"]) as f: model_cfg = json.load(f) return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16) class CodeHiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: super().__init__() self.model = CodeHiFiGANModel(model_cfg) state_dict = torch.load(checkpoint_path) self.model.load_state_dict(state_dict["generator"]) self.model.eval() if fp16: self.model.half() self.model.remove_weight_norm() logger.info(f"loaded CodeHiFiGAN checkpoint from {checkpoint_path}") def forward(self, x: Dict[str, torch.Tensor], dur_prediction=False) -> torch.Tensor: assert "code" in x x["dur_prediction"] = dur_prediction mask = x["code"] >= 0 # remove invalid code x["code"] = x["code"][mask].unsqueeze(dim=0) return self.model(**x).detach().squeeze() @classmethod def from_data_cfg(cls, args, data_cfg): vocoder_cfg = data_cfg.vocoder assert vocoder_cfg is not None, "vocoder not specified in the data config" with open(vocoder_cfg["config"]) as f: model_cfg = json.load(f) return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16) def get_vocoder(args, data_cfg: S2TDataConfig): if args.vocoder == "griffin_lim": return GriffinLimVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "hifigan": return HiFiGANVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "code_hifigan": return CodeHiFiGANVocoder.from_data_cfg(args, data_cfg) else: raise ValueError("Unknown vocoder")
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/vocoder.py
from argparse import Namespace import torch import torch.nn as nn from fairseq.models.text_to_speech.fastspeech2 import VariancePredictor from fairseq.models.text_to_speech.hifigan import Generator class CodeGenerator(Generator): def __init__(self, cfg): super().__init__(cfg) self.dict = nn.Embedding(cfg["num_embeddings"], cfg["embedding_dim"]) self.multispkr = cfg.get("multispkr", None) self.embedder = cfg.get("embedder_params", None) if self.multispkr and not self.embedder: self.spkr = nn.Embedding(cfg.get("num_speakers", 200), cfg["embedding_dim"]) elif self.embedder: self.spkr = nn.Linear(cfg.get("embedder_dim", 256), cfg["embedding_dim"]) self.dur_predictor = None if cfg.get("dur_predictor_params", None): self.dur_predictor = VariancePredictor( Namespace(**cfg["dur_predictor_params"]) ) @staticmethod def _upsample(signal, max_frames): if signal.dim() == 3: bsz, channels, cond_length = signal.size() elif signal.dim() == 2: signal = signal.unsqueeze(2) bsz, channels, cond_length = signal.size() else: signal = signal.view(-1, 1, 1) bsz, channels, cond_length = signal.size() signal = signal.unsqueeze(3).repeat(1, 1, 1, max_frames // cond_length) # pad zeros as needed (if signal's shape does not divide completely with max_frames) reminder = (max_frames - signal.shape[2] * signal.shape[3]) // signal.shape[3] if reminder > 0: raise NotImplementedError( "Padding condition signal - misalignment between condition features." ) signal = signal.view(bsz, channels, max_frames) return signal def forward(self, **kwargs): x = self.dict(kwargs["code"]).transpose(1, 2) if self.dur_predictor and kwargs.get("dur_prediction", False): assert x.size(0) == 1, "only support single sample" log_dur_pred = self.dur_predictor(x.transpose(1, 2)) dur_out = torch.clamp( torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1 ) # B x C x T x = torch.repeat_interleave(x, dur_out.view(-1), dim=2) if self.multispkr: assert ( "spkr" in kwargs ), 'require "spkr" input for multispeaker CodeHiFiGAN vocoder' spkr = self.spkr(kwargs["spkr"]).transpose(1, 2) spkr = self._upsample(spkr, x.shape[-1]) x = torch.cat([x, spkr], dim=1) for k, feat in kwargs.items(): if k in ["spkr", "code", "dur_prediction"]: continue feat = self._upsample(feat, x.shape[-1]) x = torch.cat([x, feat], dim=1) return super().forward(x)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/codehifigan.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from torch import nn from torch.nn import functional as F from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import LSTMCellWithZoneOut, LocationAttention logger = logging.getLogger(__name__) def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) class Tacotron2Encoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.padding_idx = src_dict.pad() self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.embed_tokens = nn.Embedding( len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx ) assert args.encoder_conv_kernel_size % 2 == 1 self.convolutions = nn.ModuleList( nn.Sequential( nn.Conv1d( args.encoder_embed_dim, args.encoder_embed_dim, kernel_size=args.encoder_conv_kernel_size, padding=((args.encoder_conv_kernel_size - 1) // 2), ), nn.BatchNorm1d(args.encoder_embed_dim), nn.ReLU(), nn.Dropout(args.encoder_dropout), ) for _ in range(args.encoder_conv_layers) ) self.lstm = nn.LSTM( args.encoder_embed_dim, args.encoder_embed_dim // 2, num_layers=args.encoder_lstm_layers, batch_first=True, bidirectional=True, ) self.apply(encoder_init) def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): x = self.embed_tokens(src_tokens) x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T for conv in self.convolutions: x = conv(x) x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C src_lengths = src_lengths.cpu().long() x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True) x = self.lstm(x)[0] x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0] encoder_padding_mask = src_tokens.eq(self.padding_idx) if self.embed_speaker is not None: seq_len, bsz, _ = x.size() emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) return { "encoder_out": [x], # B x T x C "encoder_padding_mask": encoder_padding_mask, # B x T } class Prenet(nn.Module): def __init__(self, in_dim, n_layers, n_units, dropout): super().__init__() self.layers = nn.ModuleList( nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units), nn.ReLU()) for i in range(n_layers) ) self.dropout = dropout def forward(self, x): for layer in self.layers: x = F.dropout(layer(x), p=self.dropout) # always applies dropout return x class Postnet(nn.Module): def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() assert kernel_size % 2 == 1 for i in range(n_layers): cur_layers = ( [ nn.Conv1d( in_dim if i == 0 else n_channels, n_channels if i < n_layers - 1 else in_dim, kernel_size=kernel_size, padding=((kernel_size - 1) // 2), ), nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim), ] + ([nn.Tanh()] if i < n_layers - 1 else []) + [nn.Dropout(dropout)] ) nn.init.xavier_uniform_( cur_layers[0].weight, torch.nn.init.calculate_gain("tanh" if i < n_layers - 1 else "linear"), ) self.convolutions.append(nn.Sequential(*cur_layers)) def forward(self, x): x = x.transpose(1, 2) # B x T x C -> B x C x T for conv in self.convolutions: x = conv(x) return x.transpose(1, 2) def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) class Tacotron2Decoder(FairseqIncrementalDecoder): def __init__(self, args, src_dict): super().__init__(None) self.args = args self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.prenet = Prenet( self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout ) # take prev_context, prev_frame, (speaker embedding) as input self.attention_lstm = LSTMCellWithZoneOut( args.zoneout, args.prenet_dim + args.encoder_embed_dim, args.decoder_lstm_dim, ) # take attention_lstm output, attention_state, encoder_out as input self.attention = LocationAttention( args.attention_dim, args.encoder_embed_dim, args.decoder_lstm_dim, (1 + int(args.attention_use_cumprob)), args.attention_conv_dim, args.attention_conv_kernel_size, ) # take attention_lstm output, context, (gated_latent) as input self.lstm = nn.ModuleList( LSTMCellWithZoneOut( args.zoneout, args.encoder_embed_dim + args.decoder_lstm_dim, args.decoder_lstm_dim, ) for i in range(args.decoder_lstm_layers) ) proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim self.feat_proj = nn.Linear(proj_in_dim, self.out_dim) self.eos_proj = nn.Linear(proj_in_dim, 1) self.postnet = Postnet( self.out_dim, args.postnet_conv_dim, args.postnet_conv_kernel_size, args.postnet_layers, args.postnet_dropout, ) self.ctc_proj = None if getattr(args, "ctc_weight", 0.0) > 0.0: self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) self.apply(decoder_init) def _get_states(self, incremental_state, enc_out): bsz, in_len, _ = enc_out.size() alstm_h = self.get_incremental_state(incremental_state, "alstm_h") if alstm_h is None: alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) alstm_c = self.get_incremental_state(incremental_state, "alstm_c") if alstm_c is None: alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) lstm_h = self.get_incremental_state(incremental_state, "lstm_h") if lstm_h is None: lstm_h = [ enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) for _ in range(self.args.decoder_lstm_layers) ] lstm_c = self.get_incremental_state(incremental_state, "lstm_c") if lstm_c is None: lstm_c = [ enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) for _ in range(self.args.decoder_lstm_layers) ] attn_w = self.get_incremental_state(incremental_state, "attn_w") if attn_w is None: attn_w = enc_out.new_zeros(bsz, in_len) attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum") if attn_w_cum is None: attn_w_cum = enc_out.new_zeros(bsz, in_len) return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum def _get_init_attn_c(self, enc_out, enc_mask): bsz = enc_out.size(0) if self.args.init_attn_c == "zero": return enc_out.new_zeros(bsz, self.args.encoder_embed_dim) elif self.args.init_attn_c == "avg": enc_w = (~enc_mask).type(enc_out.type()) enc_w = enc_w / enc_w.sum(dim=1, keepdim=True) return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1) else: raise ValueError(f"{self.args.init_attn_c} not supported") def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, target_lengths=None, **kwargs, ): enc_mask = encoder_out["encoder_padding_mask"] enc_out = encoder_out["encoder_out"][0] in_len = enc_out.size(1) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:, :] bsz, out_len, _ = prev_output_tokens.size() prenet_out = self.prenet(prev_output_tokens) (alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum) = self._get_states( incremental_state, enc_out ) attn_ctx = self._get_init_attn_c(enc_out, enc_mask) attn_out = enc_out.new_zeros(bsz, in_len, out_len) feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim) eos_out = enc_out.new_zeros(bsz, out_len) for t in range(out_len): alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1) alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c)) attn_state = attn_w.unsqueeze(1) if self.args.attention_use_cumprob: attn_state = torch.stack((attn_w, attn_w_cum), dim=1) attn_ctx, attn_w = self.attention(enc_out, enc_mask, alstm_h, attn_state) attn_w_cum = attn_w_cum + attn_w attn_out[:, :, t] = attn_w for i, cur_lstm in enumerate(self.lstm): if i == 0: lstm_in = torch.cat((attn_ctx, alstm_h), dim=1) else: lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1) lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i])) proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1) feat_out[:, t, :] = self.feat_proj(proj_in) eos_out[:, t] = self.eos_proj(proj_in).squeeze(1) self.attention.clear_cache() self.set_incremental_state(incremental_state, "alstm_h", alstm_h) self.set_incremental_state(incremental_state, "alstm_c", alstm_c) self.set_incremental_state(incremental_state, "lstm_h", lstm_h) self.set_incremental_state(incremental_state, "lstm_c", lstm_c) self.set_incremental_state(incremental_state, "attn_w", attn_w) self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum) post_feat_out = feat_out + self.postnet(feat_out) eos_out = eos_out.view(bsz, out_len, 1) return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out} @register_model("tacotron_2") class Tacotron2Model(FairseqEncoderDecoderModel): """ Implementation for https://arxiv.org/pdf/1712.05884.pdf """ @staticmethod def add_args(parser): # encoder parser.add_argument("--encoder-dropout", type=float) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-conv-layers", type=int) parser.add_argument("--encoder-conv-kernel-size", type=int) parser.add_argument("--encoder-lstm-layers", type=int) # decoder parser.add_argument("--attention-dim", type=int) parser.add_argument("--attention-conv-dim", type=int) parser.add_argument("--attention-conv-kernel-size", type=int) parser.add_argument("--prenet-dropout", type=float) parser.add_argument("--prenet-layers", type=int) parser.add_argument("--prenet-dim", type=int) parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) parser.add_argument("--init-attn-c", type=str) parser.add_argument("--attention-use-cumprob", action="store_true") parser.add_argument("--zoneout", type=float) parser.add_argument("--decoder-lstm-layers", type=int) parser.add_argument("--decoder-lstm-dim", type=int) parser.add_argument("--output-frame-dim", type=int) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_updates = 0 @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker) decoder = Tacotron2Decoder(args, task.src_dict) return cls(encoder, decoder) def forward_encoder(self, src_tokens, src_lengths, **kwargs): return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates @register_model_architecture("tacotron_2", "tacotron_2") def base_architecture(args): # encoder args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1) # decoder args.attention_dim = getattr(args, "attention_dim", 128) args.attention_conv_dim = getattr(args, "attention_conv_dim", 32) args.attention_conv_kernel_size = getattr(args, "attention_conv_kernel_size", 15) args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) args.init_attn_c = getattr(args, "init_attn_c", "zero") args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True) args.zoneout = getattr(args, "zoneout", 0.1) args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2) args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024) args.output_frame_dim = getattr(args, "output_frame_dim", 80)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/tacotron2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .tacotron2 import * # noqa from .tts_transformer import * # noqa from .fastspeech2 import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from torch import nn from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.text_to_speech.hub_interface import TTSHubInterface from fairseq.models.text_to_speech.tacotron2 import Postnet from fairseq.modules import ( FairseqDropout, LayerNorm, MultiheadAttention, PositionalEmbedding, ) logger = logging.getLogger(__name__) def model_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) def Embedding(num_embeddings, embedding_dim, padding_idx=None): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m class PositionwiseFeedForward(nn.Module): def __init__(self, in_dim, hidden_dim, kernel_size, dropout): super().__init__() self.ffn = nn.Sequential( nn.Conv1d( in_dim, hidden_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, ), nn.ReLU(), nn.Conv1d( hidden_dim, in_dim, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, ), ) self.layer_norm = LayerNorm(in_dim) self.dropout = self.dropout_module = FairseqDropout( p=dropout, module_name=self.__class__.__name__ ) def forward(self, x): # B x T x C residual = x x = self.ffn(x.transpose(1, 2)).transpose(1, 2) x = self.dropout(x) return self.layer_norm(x + residual) class FFTLayer(torch.nn.Module): def __init__( self, embed_dim, n_heads, hidden_dim, kernel_size, dropout, attention_dropout ): super().__init__() self.self_attn = MultiheadAttention( embed_dim, n_heads, dropout=attention_dropout, self_attention=True ) self.layer_norm = LayerNorm(embed_dim) self.ffn = PositionwiseFeedForward( embed_dim, hidden_dim, kernel_size, dropout=dropout ) def forward(self, x, padding_mask=None): # B x T x C residual = x x = x.transpose(0, 1) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=padding_mask, need_weights=False ) x = x.transpose(0, 1) x = self.layer_norm(x + residual) return self.ffn(x) class LengthRegulator(nn.Module): def forward(self, x, durations): # x: B x T x C out_lens = durations.sum(dim=1) max_len = out_lens.max() bsz, seq_len, dim = x.size() out = x.new_zeros((bsz, max_len, dim)) for b in range(bsz): indices = [] for t in range(seq_len): indices.extend([t] * utils.item(durations[b, t])) indices = torch.tensor(indices, dtype=torch.long).to(x.device) out_len = utils.item(out_lens[b]) out[b, :out_len] = x[b].index_select(0, indices) return out, out_lens class VariancePredictor(nn.Module): def __init__(self, args): super().__init__() self.conv1 = nn.Sequential( nn.Conv1d( args.encoder_embed_dim, args.var_pred_hidden_dim, kernel_size=args.var_pred_kernel_size, padding=(args.var_pred_kernel_size - 1) // 2, ), nn.ReLU(), ) self.ln1 = nn.LayerNorm(args.var_pred_hidden_dim) self.dropout_module = FairseqDropout( p=args.var_pred_dropout, module_name=self.__class__.__name__ ) self.conv2 = nn.Sequential( nn.Conv1d( args.var_pred_hidden_dim, args.var_pred_hidden_dim, kernel_size=args.var_pred_kernel_size, padding=1, ), nn.ReLU(), ) self.ln2 = nn.LayerNorm(args.var_pred_hidden_dim) self.proj = nn.Linear(args.var_pred_hidden_dim, 1) def forward(self, x): # Input: B x T x C; Output: B x T x = self.conv1(x.transpose(1, 2)).transpose(1, 2) x = self.dropout_module(self.ln1(x)) x = self.conv2(x.transpose(1, 2)).transpose(1, 2) x = self.dropout_module(self.ln2(x)) return self.proj(x).squeeze(dim=2) class VarianceAdaptor(nn.Module): def __init__(self, args): super().__init__() self.args = args self.length_regulator = LengthRegulator() self.duration_predictor = VariancePredictor(args) self.pitch_predictor = VariancePredictor(args) self.energy_predictor = VariancePredictor(args) n_bins, steps = self.args.var_pred_n_bins, self.args.var_pred_n_bins - 1 self.pitch_bins = torch.linspace(args.pitch_min, args.pitch_max, steps) self.embed_pitch = Embedding(n_bins, args.encoder_embed_dim) self.energy_bins = torch.linspace(args.energy_min, args.energy_max, steps) self.embed_energy = Embedding(n_bins, args.encoder_embed_dim) def get_pitch_emb(self, x, tgt=None, factor=1.0): out = self.pitch_predictor(x) bins = self.pitch_bins.to(x.device) if tgt is None: out = out * factor emb = self.embed_pitch(torch.bucketize(out, bins)) else: emb = self.embed_pitch(torch.bucketize(tgt, bins)) return out, emb def get_energy_emb(self, x, tgt=None, factor=1.0): out = self.energy_predictor(x) bins = self.energy_bins.to(x.device) if tgt is None: out = out * factor emb = self.embed_energy(torch.bucketize(out, bins)) else: emb = self.embed_energy(torch.bucketize(tgt, bins)) return out, emb def forward( self, x, padding_mask, durations=None, pitches=None, energies=None, d_factor=1.0, p_factor=1.0, e_factor=1.0, ): # x: B x T x C log_dur_out = self.duration_predictor(x) dur_out = torch.clamp( torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0 ) dur_out.masked_fill_(padding_mask, 0) pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor) x = x + pitch_emb energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor) x = x + energy_emb x, out_lens = self.length_regulator( x, dur_out if durations is None else durations ) return x, out_lens, log_dur_out, pitch_out, energy_out class FastSpeech2Encoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.args = args self.padding_idx = src_dict.pad() self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_tokens = Embedding( len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx ) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.dec_pos_emb_alpha = nn.Parameter(torch.ones(1)) self.encoder_fft_layers = nn.ModuleList( FFTLayer( args.encoder_embed_dim, args.encoder_attention_heads, args.fft_hidden_dim, args.fft_kernel_size, dropout=args.dropout, attention_dropout=args.attention_dropout, ) for _ in range(args.encoder_layers) ) self.var_adaptor = VarianceAdaptor(args) self.decoder_fft_layers = nn.ModuleList( FFTLayer( args.decoder_embed_dim, args.decoder_attention_heads, args.fft_hidden_dim, args.fft_kernel_size, dropout=args.dropout, attention_dropout=args.attention_dropout, ) for _ in range(args.decoder_layers) ) self.out_proj = nn.Linear(args.decoder_embed_dim, self.out_dim) self.postnet = None if args.add_postnet: self.postnet = Postnet( self.out_dim, args.postnet_conv_dim, args.postnet_conv_kernel_size, args.postnet_layers, args.postnet_dropout, ) self.apply(model_init) def forward( self, src_tokens, src_lengths=None, speaker=None, durations=None, pitches=None, energies=None, **kwargs, ): x = self.embed_tokens(src_tokens) enc_padding_mask = src_tokens.eq(self.padding_idx) x += self.pos_emb_alpha * self.embed_positions(enc_padding_mask) x = self.dropout_module(x) for layer in self.encoder_fft_layers: x = layer(x, enc_padding_mask) if self.embed_speaker is not None: bsz, seq_len, _ = x.size() emb = self.embed_speaker(speaker).expand(bsz, seq_len, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) x, out_lens, log_dur_out, pitch_out, energy_out = self.var_adaptor( x, enc_padding_mask, durations, pitches, energies ) dec_padding_mask = lengths_to_padding_mask(out_lens) x += self.dec_pos_emb_alpha * self.embed_positions(dec_padding_mask) for layer in self.decoder_fft_layers: x = layer(x, dec_padding_mask) x = self.out_proj(x) x_post = None if self.postnet is not None: x_post = x + self.postnet(x) return x, x_post, out_lens, log_dur_out, pitch_out, energy_out @register_model("fastspeech2") class FastSpeech2Model(FairseqEncoderModel): """ Implementation for https://arxiv.org/abs/2006.04558 """ NON_AUTOREGRESSIVE = True @classmethod def hub_models(cls): base_url = "http://dl.fbaipublicfiles.com/fairseq/s2" model_ids = [ "fastspeech2-en-ljspeech", "fastspeech2-en-200_speaker-cv4", ] return {i: f"{base_url}/{i}.tar.gz" for i in model_ids} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", config_yaml="config.yaml", vocoder: str = "griffin_lim", fp16: bool = False, **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), config_yaml=config_yaml, vocoder=vocoder, fp16=fp16, **kwargs, ) return TTSHubInterface(x["args"], x["task"], x["models"][0]) @staticmethod def add_args(parser): parser.add_argument("--dropout", type=float) parser.add_argument("--output-frame-dim", type=int) parser.add_argument("--speaker-embed-dim", type=int) # FFT blocks parser.add_argument("--fft-hidden-dim", type=int) parser.add_argument("--fft-kernel-size", type=int) parser.add_argument("--attention-dropout", type=float) parser.add_argument("--encoder-layers", type=int) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-attention-heads", type=int) parser.add_argument("--decoder-layers", type=int) parser.add_argument("--decoder-embed-dim", type=int) parser.add_argument("--decoder-attention-heads", type=int) # variance predictor parser.add_argument("--var-pred-n-bins", type=int) parser.add_argument("--var-pred-hidden-dim", type=int) parser.add_argument("--var-pred-kernel-size", type=int) parser.add_argument("--var-pred-dropout", type=float) # postnet parser.add_argument("--add-postnet", action="store_true") parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) def __init__(self, encoder, args, src_dict): super().__init__(encoder) self._num_updates = 0 out_dim = args.output_frame_dim * args.n_frames_per_step self.ctc_proj = None if getattr(args, "ctc_weight", 0.0) > 0.0: self.ctc_proj = nn.Linear(out_dim, len(src_dict)) @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker) return cls(encoder, args, task.src_dict) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates def get_normalized_probs(self, net_output, log_probs, sample=None): logits = self.ctc_proj(net_output[0]) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) @register_model_architecture("fastspeech2", "fastspeech2") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.2) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # FFT blocks args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024) args.fft_kernel_size = getattr(args, "fft_kernel_size", 9) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) # variance predictor args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256) args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256) args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3) args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5) # postnet args.add_postnet = getattr(args, "add_postnet", False) args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/fastspeech2.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Conv1d, ConvTranspose1d from torch.nn.utils import weight_norm, remove_weight_norm LRELU_SLOPE = 0.1 def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: m.weight.data.normal_(mean, std) def get_padding(kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 class ResBlock(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock, self).__init__() self.convs1 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]), ) ), ] ) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), weight_norm( Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1), ) ), ] ) self.convs2.apply(init_weights) def forward(self, x): for c1, c2 in zip(self.convs1, self.convs2): xt = F.leaky_relu(x, LRELU_SLOPE) xt = c1(xt) xt = F.leaky_relu(xt, LRELU_SLOPE) xt = c2(xt) x = xt + x return x def remove_weight_norm(self): for layer in self.convs1: remove_weight_norm(layer) for layer in self.convs2: remove_weight_norm(layer) class Generator(torch.nn.Module): def __init__(self, cfg): super(Generator, self).__init__() self.num_kernels = len(cfg["resblock_kernel_sizes"]) self.num_upsamples = len(cfg["upsample_rates"]) self.conv_pre = weight_norm( Conv1d( cfg.get("model_in_dim", 80), cfg["upsample_initial_channel"], 7, 1, padding=3, ) ) self.ups = nn.ModuleList() for i, (u, k) in enumerate( zip(cfg["upsample_rates"], cfg["upsample_kernel_sizes"]) ): self.ups.append( weight_norm( ConvTranspose1d( cfg["upsample_initial_channel"] // (2 ** i), cfg["upsample_initial_channel"] // (2 ** (i + 1)), k, u, padding=(k - u) // 2, ) ) ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = cfg["upsample_initial_channel"] // (2 ** (i + 1)) for k, d in zip( cfg["resblock_kernel_sizes"], cfg["resblock_dilation_sizes"] ): self.resblocks.append(ResBlock(ch, k, d)) self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) self.ups.apply(init_weights) self.conv_post.apply(init_weights) def forward(self, x): x = self.conv_pre(x) for i in range(self.num_upsamples): x = F.leaky_relu(x, LRELU_SLOPE) x = self.ups[i](x) xs = None for j in range(self.num_kernels): if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) else: xs += self.resblocks[i * self.num_kernels + j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) x = torch.tanh(x) return x def remove_weight_norm(self): print("Removing weight norm...") for layer in self.ups: remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() remove_weight_norm(self.conv_pre) remove_weight_norm(self.conv_post)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/hifigan.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch from torch import nn from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.models.text_to_speech.hub_interface import TTSHubInterface from fairseq.models.text_to_speech.tacotron2 import Postnet, Prenet from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerDecoderLayer, TransformerEncoderLayer, ) logger = logging.getLogger(__name__) def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) def Embedding(num_embeddings, embedding_dim): m = nn.Embedding(num_embeddings, embedding_dim) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m class TTSTransformerEncoder(FairseqEncoder): def __init__(self, args, src_dict, embed_speaker): super().__init__(src_dict) self.padding_idx = src_dict.pad() self.embed_speaker = embed_speaker self.spk_emb_proj = None if embed_speaker is not None: self.spk_emb_proj = nn.Linear( args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim ) self.dropout_module = FairseqDropout( p=args.dropout, module_name=self.__class__.__name__ ) self.embed_tokens = nn.Embedding( len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx ) assert args.encoder_conv_kernel_size % 2 == 1 self.prenet = nn.ModuleList( nn.Sequential( nn.Conv1d( args.encoder_embed_dim, args.encoder_embed_dim, kernel_size=args.encoder_conv_kernel_size, padding=((args.encoder_conv_kernel_size - 1) // 2), ), nn.BatchNorm1d(args.encoder_embed_dim), nn.ReLU(), nn.Dropout(args.encoder_dropout), ) for _ in range(args.encoder_conv_layers) ) self.prenet_proj = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, args.encoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.transformer_layers = nn.ModuleList( TransformerEncoderLayer(args) for _ in range(args.encoder_transformer_layers) ) if args.encoder_normalize_before: self.layer_norm = LayerNorm(args.encoder_embed_dim) else: self.layer_norm = None self.apply(encoder_init) def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): x = self.embed_tokens(src_tokens) x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T for conv in self.prenet: x = conv(x) x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C x = self.prenet_proj(x) padding_mask = src_tokens.eq(self.padding_idx) positions = self.embed_positions(padding_mask) x += self.pos_emb_alpha * positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) for layer in self.transformer_layers: x = layer(x, padding_mask) if self.layer_norm is not None: x = self.layer_norm(x) if self.embed_speaker is not None: seq_len, bsz, _ = x.size() emb = self.embed_speaker(speaker).transpose(0, 1) emb = emb.expand(seq_len, bsz, -1) x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) return { "encoder_out": [x], # T x B x C "encoder_padding_mask": [padding_mask] if padding_mask.any() else [], # B x T "encoder_embedding": [], # B x T x C "encoder_states": [], # List[T x B x C] "src_tokens": [], "src_lengths": [], } def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) class TTSTransformerDecoder(FairseqIncrementalDecoder): def __init__(self, args, src_dict, padding_idx=1): super().__init__(None) self._future_mask = torch.empty(0) self.args = args self.padding_idx = src_dict.pad() if src_dict else padding_idx self.n_frames_per_step = args.n_frames_per_step self.out_dim = args.output_frame_dim * args.n_frames_per_step self.dropout_module = FairseqDropout( args.dropout, module_name=self.__class__.__name__ ) self.embed_positions = PositionalEmbedding( args.max_target_positions, args.decoder_embed_dim, self.padding_idx ) self.pos_emb_alpha = nn.Parameter(torch.ones(1)) self.prenet = nn.Sequential( Prenet( self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout ), nn.Linear(args.prenet_dim, args.decoder_embed_dim), ) self.n_transformer_layers = args.decoder_transformer_layers self.transformer_layers = nn.ModuleList( TransformerDecoderLayer(args) for _ in range(self.n_transformer_layers) ) if args.decoder_normalize_before: self.layer_norm = LayerNorm(args.decoder_embed_dim) else: self.layer_norm = None self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim) self.eos_proj = nn.Linear(args.decoder_embed_dim, 1) self.postnet = Postnet( self.out_dim, args.postnet_conv_dim, args.postnet_conv_kernel_size, args.postnet_layers, args.postnet_dropout, ) self.ctc_proj = None if getattr(args, "ctc_weight", 0.0) > 0.0: self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) self.apply(decoder_init) def extract_features( self, prev_outputs, encoder_out=None, incremental_state=None, target_lengths=None, speaker=None, **kwargs, ): alignment_layer = self.n_transformer_layers - 1 self_attn_padding_mask = lengths_to_padding_mask(target_lengths) positions = self.embed_positions( self_attn_padding_mask, incremental_state=incremental_state ) if incremental_state is not None: prev_outputs = prev_outputs[:, -1:, :] self_attn_padding_mask = self_attn_padding_mask[:, -1:] if positions is not None: positions = positions[:, -1:] x = self.prenet(prev_outputs) x += self.pos_emb_alpha * positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) if not self_attn_padding_mask.any(): self_attn_padding_mask = None attn: Optional[torch.Tensor] = None inner_states: List[Optional[torch.Tensor]] = [x] for idx, transformer_layer in enumerate(self.transformer_layers): if incremental_state is None: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = transformer_layer( x, encoder_out["encoder_out"][0] if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) else None, encoder_out["encoder_padding_mask"][0] if ( encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0 ) else None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: # average probabilities over heads, transpose to # (B, src_len, tgt_len) attn = attn.mean(dim=0).transpose(2, 1) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, target_lengths=None, speaker=None, **kwargs, ): x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, target_lengths=target_lengths, speaker=speaker, **kwargs, ) attn = extra["attn"] feat_out = self.feat_proj(x) bsz, seq_len, _ = x.size() eos_out = self.eos_proj(x) post_feat_out = feat_out + self.postnet(feat_out) return ( post_feat_out, eos_out, { "attn": attn, "feature_out": feat_out, "inner_states": extra["inner_states"], }, ) def get_normalized_probs(self, net_output, log_probs, sample): logits = self.ctc_proj(net_output[2]["feature_out"]) if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) self._future_mask = self._future_mask.to(tensor) return self._future_mask[:dim, :dim] @register_model("tts_transformer") class TTSTransformerModel(FairseqEncoderDecoderModel): """ Implementation for https://arxiv.org/pdf/1809.08895.pdf """ @classmethod def hub_models(cls): base_url = "http://dl.fbaipublicfiles.com/fairseq/s2" model_ids = [ "tts_transformer-en-ljspeech", "tts_transformer-en-200_speaker-cv4", "tts_transformer-es-css10", "tts_transformer-fr-cv7_css10", "tts_transformer-ru-cv7_css10", "tts_transformer-zh-cv7_css10", "tts_transformer-ar-cv7_css10", "tts_transformer-tr-cv7_css10", "tts_transformer-vi-cv7", ] return {i: f"{base_url}/{i}.tar.gz" for i in model_ids} @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", config_yaml="config.yaml", vocoder: str = "griffin_lim", fp16: bool = False, **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), config_yaml=config_yaml, vocoder=vocoder, fp16=fp16, **kwargs, ) return TTSHubInterface(x["args"], x["task"], x["models"][0]) @staticmethod def add_args(parser): parser.add_argument("--dropout", type=float) parser.add_argument("--output-frame-dim", type=int) parser.add_argument("--speaker-embed-dim", type=int) # encoder prenet parser.add_argument("--encoder-dropout", type=float) parser.add_argument("--encoder-conv-layers", type=int) parser.add_argument("--encoder-conv-kernel-size", type=int) # encoder transformer layers parser.add_argument("--encoder-transformer-layers", type=int) parser.add_argument("--encoder-embed-dim", type=int) parser.add_argument("--encoder-ffn-embed-dim", type=int) parser.add_argument("--encoder-normalize-before", action="store_true") parser.add_argument("--encoder-attention-heads", type=int) parser.add_argument("--attention-dropout", type=float) parser.add_argument("--activation-dropout", "--relu-dropout", type=float) parser.add_argument("--activation-fn", type=str, default="relu") # decoder prenet parser.add_argument("--prenet-dropout", type=float) parser.add_argument("--prenet-layers", type=int) parser.add_argument("--prenet-dim", type=int) # decoder postnet parser.add_argument("--postnet-dropout", type=float) parser.add_argument("--postnet-layers", type=int) parser.add_argument("--postnet-conv-dim", type=int) parser.add_argument("--postnet-conv-kernel-size", type=int) # decoder transformer layers parser.add_argument("--decoder-transformer-layers", type=int) parser.add_argument("--decoder-embed-dim", type=int) parser.add_argument("--decoder-ffn-embed-dim", type=int) parser.add_argument("--decoder-normalize-before", action="store_true") parser.add_argument("--decoder-attention-heads", type=int) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_updates = 0 @classmethod def build_model(cls, args, task): embed_speaker = task.get_speaker_embeddings(args) encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker) decoder = TTSTransformerDecoder(args, task.src_dict) return cls(encoder, decoder) def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs): return self.encoder( src_tokens, src_lengths=src_lengths, speaker=speaker, **kwargs ) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self._num_updates = num_updates @register_model_architecture("tts_transformer", "tts_transformer") def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # encoder prenet args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) # encoder transformer layers args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr( args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim ) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") # decoder prenet args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) # decoder postnet args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) # decoder transformer layers args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim ) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/tts_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from typing import Optional, Dict, Tuple import random import torch import torch.nn as nn logger = logging.getLogger(__name__) class TTSHubInterface(nn.Module): def __init__(self, cfg, task, model): super().__init__() self.cfg = cfg self.task = task self.model = model self.model.eval() self.update_cfg_with_data_cfg(self.cfg, self.task.data_cfg) self.generator = self.task.build_generator([self.model], self.cfg) @classmethod def phonemize( cls, text: str, lang: Optional[str], phonemizer: Optional[str] = None, preserve_punct: bool = False, to_simplified_zh: bool = False, ): if to_simplified_zh: import hanziconv text = hanziconv.HanziConv.toSimplified(text) if phonemizer == "g2p": import g2p_en g2p = g2p_en.G2p() if preserve_punct: return " ".join("|" if p == " " else p for p in g2p(text)) else: res = [{",": "sp", ";": "sp"}.get(p, p) for p in g2p(text)] return " ".join(p for p in res if p.isalnum()) if phonemizer == "g2pc": import g2pc g2p = g2pc.G2pC() return " ".join([w[3] for w in g2p(text)]) elif phonemizer == "ipa": assert lang is not None import phonemizer from phonemizer.separator import Separator lang_map = {"en": "en-us", "fr": "fr-fr"} return phonemizer.phonemize( text, backend="espeak", language=lang_map.get(lang, lang), separator=Separator(word="| ", phone=" "), ) else: return text @classmethod def tokenize(cls, text: str, tkn_cfg: Dict[str, str]): sentencepiece_model = tkn_cfg.get("sentencepiece_model", None) if sentencepiece_model is not None: assert Path(sentencepiece_model).exists() import sentencepiece as sp spm = sp.SentencePieceProcessor() spm.Load(sentencepiece_model) return " ".join(spm.Encode(text, out_type=str)) else: return text @classmethod def update_cfg_with_data_cfg(cls, cfg, data_cfg): cfg["task"].vocoder = data_cfg.vocoder.get("type", "griffin_lim") @classmethod def get_model_input( cls, task, text: str, speaker: Optional[int] = None, verbose: bool = False ): phonemized = cls.phonemize( text, task.data_cfg.hub.get("lang", None), task.data_cfg.hub.get("phonemizer", None), task.data_cfg.hub.get("preserve_punct", False), task.data_cfg.hub.get("to_simplified_zh", False), ) tkn_cfg = task.data_cfg.bpe_tokenizer tokenized = cls.tokenize(phonemized, tkn_cfg) if verbose: logger.info(f"text: {text}") logger.info(f"phonemized: {phonemized}") logger.info(f"tokenized: {tokenized}") spk = task.data_cfg.hub.get("speaker", speaker) n_speakers = len(task.speaker_to_id or {}) if spk is None and n_speakers > 0: spk = random.randint(0, n_speakers - 1) if spk is not None: spk = max(0, min(spk, n_speakers - 1)) if verbose: logger.info(f"speaker: {spk}") spk = None if spk is None else torch.Tensor([[spk]]).long() src_tokens = task.src_dict.encode_line(tokenized).view(1, -1) src_lengths = torch.Tensor([len(tokenized.split())]).long() return { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "prev_output_tokens": None, }, "target_lengths": None, "speaker": spk, } @classmethod def get_prediction(cls, task, model, generator, sample) -> Tuple[torch.Tensor, int]: prediction = generator.generate(model, sample) return prediction[0]["waveform"], task.sr def predict( self, text: str, speaker: Optional[int] = None, verbose: bool = False ) -> Tuple[torch.Tensor, int]: sample = self.get_model_input(self.task, text, speaker, verbose=verbose) return self.get_prediction(self.task, self.model, self.generator, sample)
KosmosX-API-main
kosmosX/fairseq/fairseq/models/text_to_speech/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ from fairseq.dataclass.configs import FairseqConfig from fairseq.distributed import utils as distributed_utils from fairseq.trainer import Trainer try: from fairseq.model_parallel.megatron.mpu import ( get_data_parallel_rank, get_data_parallel_world_size, get_model_parallel_src_rank, get_cuda_rng_tracker, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False class MegatronTrainer(Trainer): """Main class for model parallel with data parallel training.""" def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs): if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) super().__init__(cfg, task, model, criterion, **kwargs) def clip_grad_norm(self, clip_norm): def _aggregate_model_parallel_grad_norm(total_norm): total_norm = total_norm ** 2 distributed_utils.all_reduce( total_norm, group=distributed_utils.get_model_parallel_group() ) total_norm = total_norm ** 0.5 return total_norm return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=_aggregate_model_parallel_grad_norm, ) def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" extra_state["rng_tracker_states"] = get_cuda_rng_tracker().get_states() super().save_checkpoint(filename, extra_state) def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): extra_state = super().load_checkpoint( filename, reset_optimizer=reset_optimizer, reset_lr_scheduler=reset_lr_scheduler, optimizer_overrides=optimizer_overrides, reset_meters=reset_meters, ) if extra_state is not None and "rng_tracker_states" in extra_state: get_cuda_rng_tracker().set_states(extra_state["rng_tracker_states"]) return extra_state
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/megatron_trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import criterions, models, modules # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the models/ directory models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file module = importlib.import_module("fairseq.model_parallel.models." + model_name)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch.nn as nn from fairseq.model_parallel.modules import ( ModelParallelTransformerDecoderLayer, ModelParallelTransformerEncoderLayer, ) from fairseq.models import register_model from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model("model_parallel_transformer") class ModelParallelTransformerModel(TransformerModel): """ Model parallel Transformer model. """ @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) dictionary.pad_to_multiple_(args.model_parallel_size * 8) num_embeddings = len(dictionary) padding_idx = dictionary.pad() def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5) nn.init.constant_(tensor[1], 0) emb = VocabParallelEmbedding( num_embeddings, embed_dim, padding_idx, init_method=_vocab_init ) # if provided, load from preloaded dictionaries if path: raise NotImplementedError( "Loading of embedding from path is not supported for model parallel" ) return emb @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return ModelParallelTransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return ModelParallelTransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, "no_cross_attention", False), ) class ModelParallelTransformerEncoder(TransformerEncoder): """ Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`ModelParallelTransformerEncoderLayer`. """ def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) if args.no_final_layer_norm: self.layer_norm = None def build_encoder_layer(self, args): return ModelParallelTransformerEncoderLayer(args) class ModelParallelTransformerDecoder(TransformerDecoder): """ Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`ModelParallelTransformerDecoderLayer`. """ def build_decoder_layer(self, args, no_encoder_attn=False): return ModelParallelTransformerDecoderLayer(args, no_encoder_attn) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if not self.share_input_output_embed: raise NotImplementedError( "Model parallel training currently requires --share-decoder-input-output-embed" ) features = copy_to_model_parallel_region(features) # project back to size of vocabulary x = self.output_projection(features) if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy": x = gather_from_model_parallel_region(x).contiguous() return x
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer_lm import TransformerLanguageModel try: from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model("model_parallel_transformer_lm") class ModelParallelTransformerLanguageModel(TransformerLanguageModel): @staticmethod def add_args(parser): TransformerLanguageModel.add_args(parser) @classmethod def build_model(cls, args, task): """Build a new model instance.""" if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) # make sure all arguments are present in older models base_lm_architecture(args) task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8) task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_target_positions", None) is None: args.max_target_positions = getattr( args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS ) if args.character_embeddings: raise NotImplementedError( "Character embeddings is not supported for model parallel" ) elif args.adaptive_input: raise NotImplementedError( "Adaptive input is not supported for model parallel" ) else: embed_tokens = cls.build_embedding( args, task.source_dictionary, args.decoder_input_dim ) decoder = ModelParallelTransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True, ) return cls(decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5) nn.init.constant_(tensor[1], 0) embed_tokens = VocabParallelEmbedding( len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init ) return embed_tokens def base_lm_architecture(args): # backward compatibility for older model checkpoints if hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.relu_dropout = getattr(args, "relu_dropout", 0.0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = getattr(args, "character_embeddings", False) args.character_filters = getattr( args, "character_filters", "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", ) args.character_embedding_dim = getattr(args, "character_embedding_dim", 4) args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2) args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0) args.add_bos_token = getattr(args, "add_bos_token", False) @register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron") def transformer_lm_megatron(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4) args.decoder_layers = getattr(args, "decoder_layers", 72) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_fn = getattr(args, "activation_fn", "gelu") base_lm_architecture(args) @register_model_architecture( "model_parallel_transformer_lm", "transformer_lm_megatron_11b" ) def transformer_lm_megatron_11b(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6) args.decoder_layers = getattr(args, "decoder_layers", 72) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_fn = getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .model import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding logger = logging.getLogger(__name__) DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 TORCH_PIPE = False RPC_INIT = False def import_pipe(): global TORCH_PIPE global RPC_INIT try: from torch.distributed.pipeline.sync import Pipe # noqa global Pipe from torch.distributed.pipeline.sync.utils import partition_model global partition_model from torch.distributed import rpc import tempfile TORCH_PIPE = True # Initialize single process RPC agent since TORCH_PIPE requires # RRef. RRef depends on RPC being initialized and as a result we initialize # RPC with a single node. tmpfile = tempfile.NamedTemporaryFile() if not RPC_INIT: rpc.init_rpc( name="worker", rank=0, world_size=1, rpc_backend_options=rpc.TensorPipeRpcBackendOptions( init_method="file://{}".format(tmpfile.name), ), ) RPC_INIT = True logger.info("Using torch pipe") except ImportError: try: from fairscale.nn import Pipe # noqa logger.info("Using fairscale pipe") except ImportError: raise ImportError("Please install fairscale with: pip install fairscale") @register_model("pipeline_parallel_transformer") class PipelineParallelTransformerModel(BaseFairseqModel): def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint): import_pipe() super().__init__() assert isinstance(encoder, FairseqEncoder) assert isinstance(decoder, FairseqDecoder) encoder_module_list = ( [encoder.embedding_layer] + list(encoder.encoder_layers) + [encoder.final_layer_norm] ) self.num_encoder_modules = len(encoder_module_list) decoder_module_list = ( [decoder.embedding_layer] + list(decoder.decoder_layers) + [decoder.decoder_output_layer] ) self.num_decoder_modules = len(decoder_module_list) module_list = encoder_module_list + decoder_module_list self.devices = devices if TORCH_PIPE: self.model = Pipe( partition_model(nn.Sequential(*module_list), balance, devices), chunks=chunks, checkpoint=checkpoint, ) else: self.model = Pipe( nn.Sequential(*module_list), balance=balance, devices=devices, chunks=chunks, checkpoint=checkpoint, ) self.encoder_max_positions = self.max_positions_helper( encoder.embedding_layer, "max_source_positions" ) self.decoder_max_positions = self.max_positions_helper( decoder.embedding_layer, "max_target_positions" ) self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None) # Note: To be populated during inference self.encoder = None self.decoder = None def forward(self, src_tokens, src_lengths, prev_output_tokens): if self.training: input_lst = [src_tokens, src_lengths, prev_output_tokens] input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst) if TORCH_PIPE: return self.model(input).local_value() else: return self.model(input) else: assert self.encoder is not None and self.decoder is not None, ( "encoder and decoder need to be initialized by " + "calling the `prepare_for_inference_()` method" ) encoder_output_tuple = self.encoder(input) return self.decoder(encoder_output_tuple) def prepare_for_inference_(self, cfg): if self.encoder is not None and self.decoder is not None: logger.info("Encoder and Decoder already initialized") return encoder_module_list = [] decoder_module_list = [] module_count = 0 for partition in self.model.partitions: for module in partition: if module_count < self.num_encoder_modules: encoder_module_list.append(module) else: decoder_module_list.append(module) module_count += 1 self.model = None self.encoder = TransformerEncoder( cfg.distributed_training, None, None, encoder_module_list ) self.decoder = TransformerDecoder( cfg.distributed_training, None, None, decoder_module_list=decoder_module_list, ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1, help='Number of embedding layer chunks (enables more even distribution' 'of optimizer states across data parallel nodes' 'when using optimizer state sharding and' 'a big embedding vocabulary)') # fmt: on @classmethod def build_model_base(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, "max_source_positions"): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, "max_target_positions"): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1): assert embed_dim % num_embed_chunks == 0, ( f"Number of embedding chunks = {num_embed_chunks} should be " + f"divisible by the embedding dimension = {embed_dim}" ) assert path is None or num_embed_chunks == 1, ( "Loading embedding from a path with number of embedding chunks > 1" + " is not yet supported" ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() # if provided, load from preloaded dictionaries if path: emb = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) else: embed_chunk_dim = embed_dim // num_embed_chunks emb = nn.ModuleList() for i in range(num_embed_chunks): emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx)) return emb num_embed_chunks = args.num_embedding_chunks if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path, num_embed_chunks, ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: assert args.share_decoder_input_output_embed or num_embed_chunks == 1, ( "Not sharing decoder I/O embeddings is not yet supported with number of " + "embedding chunks > 1" ) encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path, num_embed_chunks, ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path, num_embed_chunks, ) encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return (encoder, decoder) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @classmethod def build_model(cls, args, task): encoder, decoder = cls.build_model_base(args, task) return PipelineParallelTransformerModel( encoder=encoder, decoder=decoder, balance=utils.eval_str_list(args.pipeline_balance, type=int), devices=utils.eval_str_list(args.pipeline_devices, type=int), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder_max_positions, self.decoder_max_positions) def max_positions_helper( self, embedding_layer, max_positions_field="max_source_positions" ): """Maximum input length supported by the encoder or decoder.""" if embedding_layer.embed_positions is None: return getattr(embedding_layer, max_positions_field) return min( getattr(embedding_layer, max_positions_field), embedding_layer.embed_positions.max_positions, ) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output, target=target) return out.exp_() if not log_probs else out # A Pipe() module returns a tuple of tensors as the output. # In this case, the tuple has one element - the output tensor of logits logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=False) else: return utils.softmax(logits, dim=-1, onnx_trace=False) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder_max_positions def load_state_dict(self, state_dict, strict=True, model_cfg=None): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ self.upgrade_state_dict(state_dict) is_regular_transformer = not any("model.partitions" in k for k in state_dict) if is_regular_transformer: state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict) return super().load_state_dict(state_dict, strict) def convert_to_pipeline_parallel_state_dict(self, state_dict): new_state_dict = self.state_dict() encoder_layer_idx = 0 decoder_layer_idx = 0 encoder_key_suffixes = [ "self_attn.k_proj.weight", "self_attn.k_proj.bias", "self_attn.v_proj.weight", "self_attn.v_proj.bias", "self_attn.q_proj.weight", "self_attn.q_proj.bias", "self_attn.out_proj.weight", "self_attn.out_proj.bias", "self_attn_layer_norm.weight", "self_attn_layer_norm.bias", "fc1.weight", "fc1.bias", "fc2.weight", "fc2.bias", "final_layer_norm.weight", "final_layer_norm.bias", ] decoder_key_suffixes = [ "self_attn.k_proj.weight", "self_attn.k_proj.bias", "self_attn.v_proj.weight", "self_attn.v_proj.bias", "self_attn.q_proj.weight", "self_attn.q_proj.bias", "self_attn.out_proj.weight", "self_attn.out_proj.bias", "self_attn_layer_norm.weight", "self_attn_layer_norm.bias", "encoder_attn.k_proj.weight", "encoder_attn.k_proj.bias", "encoder_attn.v_proj.weight", "encoder_attn.v_proj.bias", "encoder_attn.q_proj.weight", "encoder_attn.q_proj.bias", "encoder_attn.out_proj.weight", "encoder_attn.out_proj.bias", "encoder_attn_layer_norm.weight", "encoder_attn_layer_norm.bias", "fc1.weight", "fc1.bias", "fc2.weight", "fc2.bias", "final_layer_norm.weight", "final_layer_norm.bias", ] for pid, partition in enumerate(self.model.partitions): logger.info(f"Begin Partition {pid}") for mid, module in enumerate(partition): # fmt: off if isinstance(module, TransformerEncoderEmbedding): new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor'] if isinstance(module, TransformerEncoderLayer): for suffix in encoder_key_suffixes: new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}'] encoder_layer_idx += 1 if isinstance(module, TransformerDecoderLayer): for suffix in decoder_key_suffixes: new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}'] decoder_layer_idx += 1 if isinstance(module, TransformerEncoderLayerNorm): if 'encoder.layer_norm.weight' in state_dict: new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias'] if isinstance(module, TransformerDecoderEmbedding): new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight'] new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor'] if isinstance(module, TransformerDecoderOutputLayer): new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight'] # fmt: on return new_state_dict class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) import_pipe() self.use_pipeline = encoder_module_list is not None if not self.use_pipeline: self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens) self.encoder_layers = nn.Sequential( *[TransformerEncoderLayer(args) for i in range(args.encoder_layers)] ) if isinstance(embed_tokens, nn.ModuleList): emb_dim = sum(e.embedding_dim for e in embed_tokens) else: emb_dim = embed_tokens.embedding_dim self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim) else: encoder_balance = utils.eval_str_list( args.pipeline_encoder_balance, type=int ) encoder_devices = utils.eval_str_list( args.pipeline_encoder_devices, type=int ) assert sum(encoder_balance) == len(encoder_module_list), ( f"Sum of encoder_balance={encoder_balance} is not equal " + f"to num_encoder_modules={len(encoder_module_list)}" ) if TORCH_PIPE: self.model = Pipe( module=partition_model( nn.Sequential(*encoder_module_list), encoder_balance, encoder_devices, ), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) else: self.model = Pipe( module=nn.Sequential(*encoder_module_list), balance=encoder_balance, devices=encoder_devices, chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def forward(self, src_tokens, src_lengths): """ Args: input_tuple( src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` ) Returns: output_tuple( - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - prev_output_tokens - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. ) """ dummy_prev_output_tokens = torch.zeros( 1, dtype=src_tokens.dtype, device=src_tokens.device ) input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens) if self.use_pipeline: input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) if TORCH_PIPE: encoder_out = self.model(input_tuple).local_value() else: encoder_out = self.model(input_tuple) else: encoder_embed_output_tuple = self.embedding_layer(input_tuple) encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple) encoder_out = self.final_layer_norm(encoder_layers_output) # first element is the encoder output # second element is the encoder padding mask # the remaining elements of EncoderOut are not computed by # the PipelineParallelTransformer return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out.encoder_out is not None: encoder_out = encoder_out._replace( encoder_out=encoder_out.encoder_out.index_select(1, new_order) ) if encoder_out.encoder_padding_mask is not None: encoder_out = encoder_out._replace( encoder_padding_mask=encoder_out.encoder_padding_mask.index_select( 0, new_order ) ) if encoder_out.encoder_embedding is not None: encoder_out = encoder_out._replace( encoder_embedding=encoder_out.encoder_embedding.index_select( 0, new_order ) ) if encoder_out.encoder_states is not None: for idx, state in enumerate(encoder_out.encoder_states): encoder_out.encoder_states[idx] = state.index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embedding_layer.embed_positions is None: return self.embedding_layer.max_source_positions return min( self.embedding_layer.max_source_positions, self.embedding_layer.embed_positions.max_positions, ) class TransformerDecoder(FairseqDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, decoder_module_list=None, ): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) import_pipe() self.use_pipeline = decoder_module_list is not None if not self.use_pipeline: self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens) self.decoder_layers = nn.Sequential( *[ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ] ) self.decoder_output_layer = TransformerDecoderOutputLayer( args, embed_tokens, dictionary ) else: decoder_balance = utils.eval_str_list( args.pipeline_decoder_balance, type=int ) decoder_devices = utils.eval_str_list( args.pipeline_decoder_devices, type=int ) assert sum(decoder_balance) == len(decoder_module_list), ( f"Sum of decoder_balance={decoder_balance} is not equal " + f"to num_decoder_modules={len(decoder_module_list)}" ) if TORCH_PIPE: self.model = Pipe( module=partition_model( nn.Sequential(*decoder_module_list), decoder_balance, decoder_devices, ), chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) else: self.model = Pipe( module=nn.Sequential(*decoder_module_list), balance=decoder_balance, devices=decoder_devices, chunks=args.pipeline_chunks, checkpoint=args.pipeline_checkpoint, ) def forward( self, prev_output_tokens, encoder_out=None, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ input_tuple = ( encoder_out.encoder_out, encoder_out.encoder_padding_mask, prev_output_tokens, ) if self.use_pipeline: input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) if TORCH_PIPE: return (self.model(input_tuple).local_value(),) else: return (self.model(input_tuple),) else: embed_layer_output = self.embedding_layer(input_tuple) state = self.decoder_layers(embed_layer_output) return (self.decoder_output_layer(state),) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embedding_layer.embed_positions is None: return self.embedding_layer.max_target_positions return min( self.embedding_layer.max_target_positions, self.embedding_layer.embed_positions.max_positions, ) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict @register_model_architecture( "pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel" ) def transformer_iwslt_de_en_dist(args): transformer_iwslt_de_en(args) @register_model_architecture( "pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel" ) def transformer_wmt_en_de_big_dist(args): transformer_wmt_en_de_big(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, ) EncoderOut = namedtuple( "TransformerEncoderOut", [ "encoder_out", # T x B x C "encoder_padding_mask", # B x T "encoder_embedding", # B x T x C "encoder_states", # List[T x B x C] ], ) class TransformerEncoderEmbedding(nn.Module): """Encoder Embedding + Positional Embedding""" def __init__(self, args, embed_tokens): super().__init__() self.dropout = args.dropout self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens if isinstance(embed_tokens, nn.ModuleList): self.padding_idx = embed_tokens[0].padding_idx embed_dim = sum(e.embedding_dim for e in embed_tokens) else: self.padding_idx = embed_tokens.padding_idx embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) if getattr(args, "layernorm_embedding", False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None def forward(self, input): # embed tokens and positions src_tokens = input[0] prev_output_tokens = input[2] if isinstance(self.embed_tokens, nn.ModuleList): x_embed_list = [] for embed_tokens_part in self.embed_tokens: x_embed_list.append(embed_tokens_part(src_tokens)) embedded = torch.cat(x_embed_list, dim=-1) else: embedded = self.embed_tokens(src_tokens) x = embed = self.embed_scale * embedded if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) if self.layernorm_embedding: x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) return (x, encoder_padding_mask, prev_output_tokens) class TransformerEncoderLayerNorm(nn.Module): """ Layer norm at the the end of all encoder layers if args.encoder_enormalize_before = True """ def __init__(self, args, embed_dim): super().__init__() if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, input): x = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] if self.layer_norm: x = self.layer_norm(x) # keeping track of the incremental_state is not supported yet return (x, encoder_padding_mask, prev_output_tokens) class TransformerDecoderEmbedding(nn.Module): """Decoder Embedding + Positional Embedding""" def __init__(self, args, embed_tokens): super().__init__() self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = ( sum(e.embedding_dim for e in embed_tokens) if isinstance(embed_tokens, nn.ModuleList) else embed_tokens.embedding_dim ) embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = ( embed_tokens[0].padding_idx if isinstance(embed_tokens, nn.ModuleList) else embed_tokens.padding_idx ) self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) def forward(self, input): mt_task = False if isinstance(input, tuple): if len(input) == 3: encoder_out = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] incremental_state = None # Hardcoding to avoid passing of None objects mt_task = True else: # HACK for now, need to fix (TODO sidgoyal) prev_output_tokens = input[0] # discard "src_lengths" encoder_out = None encoder_padding_mask = None incremental_state = None else: prev_output_tokens = input encoder_out = None encoder_padding_mask = None incremental_state = None positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions if isinstance(self.embed_tokens, nn.ModuleList): x_embed_list = [] for embed_tokens_part in self.embed_tokens: x_embed_list.append(embed_tokens_part(prev_output_tokens)) x = self.embed_scale * torch.cat(x_embed_list, dim=-1) else: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) if mt_task: return (x, encoder_out, encoder_padding_mask) return x class TransformerDecoderOutputLayer(nn.Module): def __init__(self, args, embed_tokens, dictionary): super().__init__() self.share_input_output_embed = args.share_decoder_input_output_embed self.embed_tokens = embed_tokens self.output_embed_dim = args.decoder_output_dim embed_dim = args.decoder_embed_dim self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None ) self.adaptive_softmax = None if args.adaptive_softmax_cutoff is not None: assert not isinstance(embed_tokens, nn.ModuleList) self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_tokens = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_( self.embed_tokens, mean=0, std=self.output_embed_dim ** -0.5 ) if args.decoder_normalize_before and not getattr( args, "no_decoder_final_norm", False ): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, input, apply_final_proj=True): if isinstance(input, tuple): x = input[0] else: x = input if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if apply_final_proj: x = self.output_layer(x) return x def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: if isinstance(self.embed_tokens, nn.ModuleList): output = None for i, emb in enumerate(self.embed_tokens): sidx = i * emb.embedding_dim eidx = (i + 1) * emb.embedding_dim if output is None: output = F.linear(features[:, :, sidx:eidx], emb.weight) else: output += F.linear(features[:, :, sidx:eidx], emb.weight) return output else: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_tokens) else: return features class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, input): """ Args: input (Tuple): input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` input[1] (ByteTensor/FloatTensor): encoder padding mask - binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. input[2] (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing) Returns: output (Tuple): output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` output[1] (ByteTensor/FloatTensor): encoder padding mask output[2] (LongTensor): previous decoder outputs """ x = input[0] encoder_padding_mask = input[1] prev_output_tokens = input[2] residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return (x, encoder_padding_mask, prev_output_tokens) def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, "char_inputs", False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward(self, input): """ Args: input (Tuple): input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)` input[2] (ByteTensor/FloatTensor): encoder padding mask - binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: output (Tuple): output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)` output[1] (ByteTensor/FloatTensor): encoder padding mask output[2] (LongTensor): previous decoder outputs """ # Note: incremental state is not yet supported mt_task = False if isinstance(input, tuple): x = input[0] encoder_out = input[1] encoder_padding_mask = input[2] incremental_state = None mt_task = True else: x = input encoder_out = None encoder_padding_mask = None incremental_state = None if incremental_state is None: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None # TODO: add back prev_self_attn_state, prev_attn_state, # self_attn_padding_mask prev_self_attn_state = None prev_attn_state = None self_attn_padding_mask = None residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if mt_task: return (x, encoder_out, encoder_padding_mask) return x def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .model import * # noqa
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/roberta/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( roberta_base_architecture, roberta_prenorm_architecture, RobertaEncoder, RobertaModel, ) from fairseq.modules import LayerNorm try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, ColumnParallelLinear, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model("model_parallel_roberta") class ModelParallelRobertaModel(RobertaModel): def __init__(self, args, encoder): super().__init__(args, encoder) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): RobertaModel.add_args(parser) parser.add_argument( "--no-final-layer-norm", action="store_true", help=( "don't add final layernorm (only applicable when " "--encoder-normalize-before=True" ), ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8) task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8) if not hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample if getattr(args, "untie_weights_roberta", False): raise NotImplementedError( "--untie-weights-roberta is not supported in model parallel mode" ) encoder = ModelParallelRobertaEncoder(args, task.source_dictionary) return cls(args, encoder) def forward( self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def register_classification_head( self, name, num_classes=None, inner_dim=None, **kwargs ): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' "and inner_dim {} (prev: {})".format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = ModelParallelRobertaClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) class ModelParallelRobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the unmasked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) x = copy_to_model_parallel_region(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) x = gather_from_model_parallel_region(x).contiguous() x = x + self.bias return x class ModelParallelRobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout ): super().__init__() self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class ModelParallelRobertaEncoder(RobertaEncoder): """RoBERTa encoder.""" def __init__(self, args, dictionary): super().__init__(args, dictionary) assert not self.args.untie_weights_roberta def build_embedding(self, vocab_size, embedding_dim, padding_idx): return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx) def build_encoder(self, args, dictionary, embed_tokens): return ModelParallelTransformerEncoder(args, dictionary, embed_tokens) def build_lm_head(self, embed_dim, output_dim, activation_fn, weight): return ModelParallelRobertaLMHead(embed_dim, output_dim, activation_fn, weight) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta") def base_architecture(args): args.no_final_layer_norm = getattr(args, "no_final_layer_norm", False) # model parallel RoBERTa defaults to "Pre-LN" formulation roberta_prenorm_architecture(args) # earlier versions of model parallel RoBERTa removed the final layer norm @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_v1") def model_parallel_roberta_v1_architecture(args): args.no_final_layer_norm = getattr(args, "no_final_layer_norm", True) base_architecture(args) @register_model_architecture( "model_parallel_roberta", "model_parallel_roberta_postnorm" ) def model_parallel_roberta_postnorm_architecture(args): # the original BERT/RoBERTa uses the "Post-LN" formulation roberta_base_architecture(args) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_base") def model_parallel_roberta_base_architecture(args): base_architecture(args) @register_model_architecture("model_parallel_roberta", "model_parallel_roberta_large") def model_parallel_roberta_large_architecture(args): args.encoder_layers = getattr(args, "encoder_layers", 24) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional, Tuple import torch from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from torch import Tensor, nn try: from fairseq.model_parallel.megatron.mpu import ( get_cuda_rng_tracker, get_model_parallel_world_size, ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False @with_incremental_state class ModelParallelMultiheadAttention(nn.Module): """Model parallel Multi-headed attention. This performs the Multi-headed attention over multiple gpus. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, self_attention=False, encoder_decoder_attention=False, ): super().__init__() if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.model_parallel_size = get_model_parallel_world_size() self.num_heads_partition = num_heads // self.model_parallel_size assert ( self.num_heads_partition * self.model_parallel_size == num_heads ), "Number of heads must be divisible by model parallel size" self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert ( not self.self_attention or self.qkv_same_dim ), "Self-attention requires query, key and value to be of the same size" self.k_proj = ColumnParallelLinear( self.kdim, embed_dim, bias=bias, gather_output=False ) self.v_proj = ColumnParallelLinear( self.vdim, embed_dim, bias=bias, gather_output=False ) self.q_proj = ColumnParallelLinear( embed_dim, embed_dim, bias=bias, gather_output=False ) self.out_proj = RowParallelLinear( embed_dim, embed_dim, bias=bias, input_is_parallel=True ) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, static_kv: bool = False, attn_mask: Optional[Tensor] = None, **unused_kwargs, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). """ tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] is_tpu = query.device.type == "xla" if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view( bsz * self.num_heads_partition, -1, self.head_dim ) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view( bsz * self.num_heads_partition, -1, self.head_dim ) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = ( ModelParallelMultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) ) saved_state["prev_key"] = k.view( bsz, self.num_heads_partition, -1, self.head_dim ) saved_state["prev_value"] = v.view( bsz, self.num_heads_partition, -1, self.head_dim ) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [ bsz * self.num_heads_partition, tgt_len, src_len, ] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view( bsz, self.num_heads_partition, tgt_len, src_len ) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view( bsz * self.num_heads_partition, tgt_len, src_len ) attn_weights_float = utils.softmax(attn_weights, dim=-1) attn_weights = attn_weights_float.type_as(attn_weights) with get_cuda_rng_tracker().fork(): attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [ bsz * self.num_heads_partition, tgt_len, self.head_dim, ] embed_dim_partition = embed_dim // self.model_parallel_size attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition) attn = self.out_proj(attn) # return attn_weights None to keep the return type same as single gpu multihead attention # This will be deprecated. attn_weights: Optional[Tensor] = None return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1)) if prev_key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1)) if key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): if input_buffer[k] is not None: input_buffer[k] = input_buffer[k].index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .multihead_attention import ModelParallelMultiheadAttention from .transformer_layer import ( ModelParallelTransformerEncoderLayer, ModelParallelTransformerDecoderLayer, ) __all__ = [ "ModelParallelMultiheadAttention", "ModelParallelTransformerEncoderLayer", "ModelParallelTransformerDecoderLayer", ]
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.model_parallel.modules import ModelParallelMultiheadAttention from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer try: from fairseq.model_parallel.megatron.mpu import ( ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer): """Encoder layer block over multiple gpus. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return ColumnParallelLinear(input_dim, output_dim, gather_output=False) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) def build_self_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, ) class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer): """Decoder layer block. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return ColumnParallelLinear(input_dim, output_dim, gather_output=False) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): if q_noise > 0: raise NotImplementedError return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) def build_self_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim=embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, self_attention=not getattr(args, "cross_self_attention", False), ) def build_encoder_attention(self, embed_dim, args, **unused_kwargs): return ModelParallelMultiheadAttention( embed_dim=embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, )
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion try: from fairseq.model_parallel.megatron.mpu.cross_entropy import ( vocab_parallel_cross_entropy, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False @register_criterion("vocab_parallel_cross_entropy") class VocabParallelCrossEntropyCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg if not has_megatron_submodule: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) target = sample["target"] loss = vocab_parallel_cross_entropy(net_output[0].float(), target) loss = (loss * (target != self.padding_idx)).sum() sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the criterions/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.model_parallel.criterions." + module)
KosmosX-API-main
kosmosX/fairseq/fairseq/model_parallel/criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ctypes import math import sys from dataclasses import dataclass, field import torch from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer from fairseq.scoring.tokenizer import EvaluationTokenizer class BleuStat(ctypes.Structure): _fields_ = [ ("reflen", ctypes.c_size_t), ("predlen", ctypes.c_size_t), ("match1", ctypes.c_size_t), ("count1", ctypes.c_size_t), ("match2", ctypes.c_size_t), ("count2", ctypes.c_size_t), ("match3", ctypes.c_size_t), ("count3", ctypes.c_size_t), ("match4", ctypes.c_size_t), ("count4", ctypes.c_size_t), ] @dataclass class SacrebleuConfig(FairseqDataclass): sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( default="13a", metadata={"help": "tokenizer"} ) sacrebleu_lowercase: bool = field( default=False, metadata={"help": "apply lowercasing"} ) sacrebleu_char_level: bool = field( default=False, metadata={"help": "evaluate at character level"} ) @register_scorer("sacrebleu", dataclass=SacrebleuConfig) class SacrebleuScorer(BaseScorer): def __init__(self, cfg): super(SacrebleuScorer, self).__init__(cfg) import sacrebleu self.sacrebleu = sacrebleu self.tokenizer = EvaluationTokenizer( tokenizer_type=cfg.sacrebleu_tokenizer, lowercase=cfg.sacrebleu_lowercase, character_tokenization=cfg.sacrebleu_char_level, ) def add_string(self, ref, pred): self.ref.append(self.tokenizer.tokenize(ref)) self.pred.append(self.tokenizer.tokenize(pred)) def _score(self, order=4): if order != 4: raise NotImplementedError # tokenization and lowercasing are performed by self.tokenizer instead. return self.sacrebleu.corpus_bleu(self.pred, [self.ref], tokenize="none") def score(self, order=4): return self._score(order).score def result_string(self, order=4): return self._score(order).format() @dataclass class BleuConfig(FairseqDataclass): pad: int = field(default=1, metadata={"help": "padding index"}) eos: int = field(default=2, metadata={"help": "eos index"}) unk: int = field(default=3, metadata={"help": "unk index"}) @register_scorer("bleu", dataclass=BleuConfig) class Scorer(object): def __init__(self, cfg): self.stat = BleuStat() self.pad = cfg.pad self.eos = cfg.eos self.unk = cfg.unk try: from fairseq import libbleu except ImportError as e: sys.stderr.write( "ERROR: missing libbleu.so. run `pip install --editable .`\n" ) raise e self.C = ctypes.cdll.LoadLibrary(libbleu.__file__) self.reset() def reset(self, one_init=False): if one_init: self.C.bleu_one_init(ctypes.byref(self.stat)) else: self.C.bleu_zero_init(ctypes.byref(self.stat)) def add(self, ref, pred): if not isinstance(ref, torch.IntTensor): raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref))) if not isinstance(pred, torch.IntTensor): raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred))) # don't match unknown words rref = ref.clone() assert not rref.lt(0).any() rref[rref.eq(self.unk)] = -999 rref = rref.contiguous().view(-1) pred = pred.contiguous().view(-1) self.C.bleu_add( ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos), ) def score(self, order=4): psum = sum( math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order] ) return self.brevity() * math.exp(psum / order) * 100 def precision(self): def ratio(a, b): return a / b if b > 0 else 0 return [ ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4), ] def brevity(self): r = self.stat.reflen / self.stat.predlen return min(1, math.exp(1 - r)) def result_string(self, order=4): assert order <= 4, "BLEU scores for order > 4 aren't supported" fmt = "BLEU{} = {:2.2f}, {:2.1f}" for _ in range(1, order): fmt += "/{:2.1f}" fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})" bleup = [p * 100 for p in self.precision()[:order]] return fmt.format( order, self.score(order=order), *bleup, self.brevity(), self.stat.predlen / self.stat.reflen, self.stat.predlen, self.stat.reflen )
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/bleu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from abc import ABC, abstractmethod from fairseq import registry from omegaconf import DictConfig class BaseScorer(ABC): def __init__(self, cfg): self.cfg = cfg self.ref = [] self.pred = [] def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) @abstractmethod def score(self) -> float: pass @abstractmethod def result_string(self) -> str: pass _build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry( "--scoring", default="bleu" ) def build_scorer(choice, tgt_dict): _choice = choice._name if isinstance(choice, DictConfig) else choice if _choice == "bleu": from fairseq.scoring import bleu return bleu.Scorer( bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk()) ) return _build_scorer(choice) # automatically import any Python files in the current directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.scoring." + module)
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer from fairseq.scoring.tokenizer import EvaluationTokenizer @dataclass class WerScorerConfig(FairseqDataclass): wer_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( default="none", metadata={"help": "sacreBLEU tokenizer to use for evaluation"} ) wer_remove_punct: bool = field( default=False, metadata={"help": "remove punctuation"} ) wer_char_level: bool = field( default=False, metadata={"help": "evaluate at character level"} ) wer_lowercase: bool = field(default=False, metadata={"help": "lowercasing"}) @register_scorer("wer", dataclass=WerScorerConfig) class WerScorer(BaseScorer): def __init__(self, cfg): super().__init__(cfg) self.reset() try: import editdistance as ed except ImportError: raise ImportError("Please install editdistance to use WER scorer") self.ed = ed self.tokenizer = EvaluationTokenizer( tokenizer_type=self.cfg.wer_tokenizer, lowercase=self.cfg.wer_lowercase, punctuation_removal=self.cfg.wer_remove_punct, character_tokenization=self.cfg.wer_char_level, ) def reset(self): self.distance = 0 self.ref_length = 0 def add_string(self, ref, pred): ref_items = self.tokenizer.tokenize(ref).split() pred_items = self.tokenizer.tokenize(pred).split() self.distance += self.ed.eval(ref_items, pred_items) self.ref_length += len(ref_items) def result_string(self): return f"WER: {self.score():.2f}" def score(self): return 100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/wer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unicodedata import sacrebleu as sb from fairseq.dataclass import ChoiceEnum SACREBLEU_V2_ABOVE = int(sb.__version__[0]) >= 2 class EvaluationTokenizer(object): """A generic evaluation-time tokenizer, which leverages built-in tokenizers in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides lowercasing, punctuation removal and character tokenization, which are applied after sacreBLEU tokenization. Args: tokenizer_type (str): the type of sacreBLEU tokenizer to apply. lowercase (bool): lowercase the text. punctuation_removal (bool): remove punctuation (based on unicode category) from text. character_tokenization (bool): tokenize the text to characters. """ SPACE = chr(32) SPACE_ESCAPE = chr(9601) _ALL_TOKENIZER_TYPES = ( sb.BLEU.TOKENIZERS if SACREBLEU_V2_ABOVE else ["none", "13a", "intl", "zh", "ja-mecab"] ) ALL_TOKENIZER_TYPES = ChoiceEnum(_ALL_TOKENIZER_TYPES) def __init__( self, tokenizer_type: str = "13a", lowercase: bool = False, punctuation_removal: bool = False, character_tokenization: bool = False, ): assert ( tokenizer_type in self._ALL_TOKENIZER_TYPES ), f"{tokenizer_type}, {self._ALL_TOKENIZER_TYPES}" self.lowercase = lowercase self.punctuation_removal = punctuation_removal self.character_tokenization = character_tokenization if SACREBLEU_V2_ABOVE: self.tokenizer = sb.BLEU(tokenize=str(tokenizer_type)).tokenizer else: self.tokenizer = sb.tokenizers.TOKENIZERS[tokenizer_type]() @classmethod def remove_punctuation(cls, sent: str): """Remove punctuation based on Unicode category.""" return cls.SPACE.join( t for t in sent.split(cls.SPACE) if not all(unicodedata.category(c)[0] == "P" for c in t) ) def tokenize(self, sent: str): tokenized = self.tokenizer(sent) if self.punctuation_removal: tokenized = self.remove_punctuation(tokenized) if self.character_tokenization: tokenized = self.SPACE.join( list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE)) ) if self.lowercase: tokenized = tokenized.lower() return tokenized
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer @dataclass class MeteorScorerConfig(FairseqDataclass): pass @register_scorer("meteor", dataclass=MeteorScorerConfig) class MeteorScorer(BaseScorer): def __init__(self, args): super(MeteorScorer, self).__init__(args) try: import nltk except ImportError: raise ImportError("Please install nltk to use METEOR scorer") self.nltk = nltk self.scores = [] def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) def score(self, order=4): self.scores = [ self.nltk.translate.meteor_score.single_meteor_score(r, p) for r, p in zip(self.ref, self.pred) ] return np.mean(self.scores) def result_string(self, order=4): return f"METEOR: {self.score():.4f}"
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/meteor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer @dataclass class ChrFScorerConfig(FairseqDataclass): pass @register_scorer("chrf", dataclass=ChrFScorerConfig) class ChrFScorer(BaseScorer): def __init__(self, args): super(ChrFScorer, self).__init__(args) import sacrebleu self.sacrebleu = sacrebleu def add_string(self, ref, pred): self.ref.append(ref) self.pred.append(pred) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if order != 4: raise NotImplementedError return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
KosmosX-API-main
kosmosX/fairseq/fairseq/scoring/chrf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from dataclasses import _MISSING_TYPE, dataclass, field from typing import Any, List, Optional import torch from fairseq.dataclass.constants import ( DATASET_IMPL_CHOICES, DDP_BACKEND_CHOICES, DDP_COMM_HOOK_CHOICES, GENERATION_CONSTRAINTS_CHOICES, GENERATION_DECODING_FORMAT_CHOICES, LOG_FORMAT_CHOICES, PIPELINE_CHECKPOINT_CHOICES, PRINT_ALIGNMENT_CHOICES, ZERO_SHARDING_CHOICES, ) from omegaconf import II, MISSING @dataclass class FairseqDataclass: """fairseq base dataclass that supported fetching attributes and metas""" _name: Optional[str] = None @staticmethod def name(): return None def _get_all_attributes(self) -> List[str]: return [k for k in self.__dataclass_fields__.keys()] def _get_meta( self, attribute_name: str, meta: str, default: Optional[Any] = None ) -> Any: return self.__dataclass_fields__[attribute_name].metadata.get(meta, default) def _get_name(self, attribute_name: str) -> str: return self.__dataclass_fields__[attribute_name].name def _get_default(self, attribute_name: str) -> Any: if hasattr(self, attribute_name): if str(getattr(self, attribute_name)).startswith("${"): return str(getattr(self, attribute_name)) elif str(self.__dataclass_fields__[attribute_name].default).startswith( "${" ): return str(self.__dataclass_fields__[attribute_name].default) elif ( getattr(self, attribute_name) != self.__dataclass_fields__[attribute_name].default ): return getattr(self, attribute_name) f = self.__dataclass_fields__[attribute_name] if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default def _get_type(self, attribute_name: str) -> Any: return self.__dataclass_fields__[attribute_name].type def _get_help(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "help") def _get_argparse_const(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_const") def _get_argparse_alias(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "argparse_alias") def _get_choices(self, attribute_name: str) -> Any: return self._get_meta(attribute_name, "choices") @classmethod def from_namespace(cls, args): if isinstance(args, cls): return args else: config = cls() for k in config.__dataclass_fields__.keys(): if k.startswith("_"): # private member, skip continue if hasattr(args, k): setattr(config, k, getattr(args, k)) return config @dataclass class CommonConfig(FairseqDataclass): # This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were # used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc. no_progress_bar: bool = field( default=False, metadata={"help": "disable progress bar"} ) log_interval: int = field( default=100, metadata={ "help": "log progress every N batches (when progress bar is disabled)" }, ) log_format: Optional[LOG_FORMAT_CHOICES] = field( default=None, metadata={"help": "log format to use"} ) log_file: Optional[str] = field( default=None, metadata={"help": "log file to copy metrics to."} ) tensorboard_logdir: Optional[str] = field( default=None, metadata={ "help": "path to save logs for tensorboard, should match --logdir " "of running tensorboard (default: no tensorboard logging)" }, ) wandb_project: Optional[str] = field( default=None, metadata={"help": "Weights and Biases project name to use for logging"}, ) azureml_logging: Optional[bool] = field( default=False, metadata={"help": "Log scalars to AzureML context"}, ) seed: int = field( default=1, metadata={"help": "pseudo random number generator seed"} ) cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"}) tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"}) bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"}) memory_efficient_bf16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of BF16 training; implies --bf16" }, ) fp16: bool = field(default=False, metadata={"help": "use FP16"}) memory_efficient_fp16: bool = field( default=False, metadata={ "help": "use a memory-efficient version of FP16 training; implies --fp16" }, ) fp16_no_flatten_grads: bool = field( default=False, metadata={"help": "don't flatten FP16 grads tensor"} ) fp16_init_scale: int = field( default=2 ** 7, metadata={"help": "default FP16 loss scale"} ) fp16_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing loss scale"}, ) fp16_scale_tolerance: float = field( default=0.0, metadata={ "help": "pct of updates that can overflow before decreasing the loss scale" }, ) on_cpu_convert_precision: bool = field( default=False, metadata={ "help": "if set, the floating point conversion to fp16/bf16 runs on CPU. " "This reduces bus transfer time and GPU memory usage." }, ) min_loss_scale: float = field( default=1e-4, metadata={ "help": "minimum FP16/AMP loss scale, after which training is stopped" }, ) threshold_loss_scale: Optional[float] = field( default=None, metadata={"help": "threshold FP16 loss scale from below"} ) amp: bool = field(default=False, metadata={"help": "use automatic mixed precision"}) amp_batch_retries: int = field( default=2, metadata={ "help": "number of retries of same batch after reducing loss scale with AMP" }, ) amp_init_scale: int = field( default=2 ** 7, metadata={"help": "default AMP loss scale"} ) amp_scale_window: Optional[int] = field( default=None, metadata={"help": "number of updates before increasing AMP loss scale"}, ) user_dir: Optional[str] = field( default=None, metadata={ "help": "path to a python module containing custom extensions (tasks and/or architectures)" }, ) empty_cache_freq: int = field( default=0, metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"}, ) all_gather_list_size: int = field( default=16384, metadata={"help": "number of bytes reserved for gathering stats from workers"}, ) model_parallel_size: int = field( default=1, metadata={"help": "total number of GPUs to parallelize model over"} ) quantization_config_path: Optional[str] = field( default=None, metadata={"help": "path to quantization config file"} ) profile: bool = field( default=False, metadata={"help": "enable autograd profiler emit_nvtx"} ) reset_logging: bool = field( default=False, metadata={ "help": "when using Hydra, reset the logging at the beginning of training" }, ) suppress_crashes: bool = field( default=False, metadata={ "help": "suppress crashes when training with the hydra_train entry point so that the " "main method can return a value (useful for sweeps)" }, ) use_plasma_view: bool = field( default=False, metadata={"help": "Store indices and sizes in shared memory"} ) plasma_path: Optional[str] = field( default="/tmp/plasma", metadata={ "help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail." }, ) deepspeed: bool = field( default=False, metadata={"help": "use deepspeed instead of fairseq for training"}, ) zero: int = field( default=0, metadata={"help": "use deepspeed zero stage 1 or 2 instead of fairseq for training"}, ) exit_interval: int = field( default=0, metadata={"help": "exit after this many seconds"}, ) @dataclass class DistributedTrainingConfig(FairseqDataclass): distributed_world_size: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of GPUs across all nodes (default: all visible GPUs)" }, ) distributed_num_procs: Optional[int] = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of processes to fork (default: all visible GPUs)" }, ) distributed_rank: Optional[int] = field( default=0, metadata={"help": "rank of the current worker"} ) distributed_backend: str = field( default="nccl", metadata={"help": "distributed backend"} ) distributed_init_method: Optional[str] = field( default=None, metadata={ "help": "typically tcp://hostname:port that will be used to " "establish initial connetion" }, ) distributed_port: int = field( default=-1, metadata={ "help": "port number (not required if using --distributed-init-method)" }, ) device_id: int = field( default=0, metadata={ "help": "which GPU to use (usually configured automatically)", "argparse_alias": "--local_rank", }, ) distributed_no_spawn: bool = field( default=False, metadata={ "help": "do not spawn multiple processes even if multiple GPUs are visible" }, ) ddp_backend: DDP_BACKEND_CHOICES = field( default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} ) ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( default="none", metadata={"help": "communication hook"} ) bucket_cap_mb: int = field( default=25, metadata={"help": "bucket size for reduction"} ) fix_batches_to_gpus: bool = field( default=False, metadata={ "help": "don't shuffle batches between GPUs; this reduces overall " "randomness and may affect precision but avoids the cost of re-reading the data" }, ) find_unused_parameters: bool = field( default=False, metadata={ "help": "disable unused parameter detection (not applicable to " "--ddp-backend=legacy_ddp)" }, ) gradient_as_bucket_view: bool = field( default=False, metadata={ "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " "--gradient-as-bucket-view=gradient_as_bucket_view)" }, ) fast_stat_sync: bool = field( default=False, metadata={"help": "[deprecated] this is now defined per Criterion"}, ) heartbeat_timeout: int = field( default=-1, metadata={ "help": "kill the job if no progress is made in N seconds; " "set to -1 to disable" }, ) broadcast_buffers: bool = field( default=False, metadata={ "help": "Copy non-trainable parameters between GPUs, such as " "batchnorm population statistics" }, ) slowmo_momentum: Optional[float] = field( default=None, metadata={ "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" }, ) slowmo_base_algorithm: str = field( default="localsgd", metadata={ "help": "Base algorithm. Either 'localsgd' or 'sgp'. Please refer " "to the documentation of 'slowmo_base_algorithm' parameter in " "https://fairscale.readthedocs.io/en/latest/api/experimental/nn/slowmo_ddp.html " "for more details" }, ) localsgd_frequency: int = field( default=3, metadata={"help": "Local SGD allreduce frequency"} ) nprocs_per_node: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "number of GPUs in each node. An allreduce operation across GPUs in " "a node is very fast. Hence, we do allreduce across GPUs in a node, " "and gossip across different nodes" }, ) pipeline_model_parallel: bool = field( default=False, metadata={"help": "if set, use pipeline model parallelism across GPUs"}, ) pipeline_balance: Optional[str] = field( default=None, metadata={ "help": "partition the model into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_balance) " "should equal the total number of layers in the model" }, ) pipeline_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-balance argument" }, ) pipeline_chunks: Optional[int] = field( default=0, metadata={"help": "microbatch count for pipeline model parallelism"} ) pipeline_encoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_encoder_balance) " "should equal the total number of encoder layers in the model" }, ) pipeline_encoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-encoder-balance argument" }, ) pipeline_decoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_decoder_balance) " "should equal the total number of decoder layers in the model" }, ) pipeline_decoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-decoder-balance argument" }, ) pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( default="never", metadata={"help": "checkpointing mode for pipeline model parallelism"}, ) zero_sharding: ZERO_SHARDING_CHOICES = field( default="none", metadata={"help": "ZeRO sharding"} ) fp16: bool = II("common.fp16") memory_efficient_fp16: bool = II("common.memory_efficient_fp16") tpu: bool = II("common.tpu") # configuration for --ddp-backend=fully_sharded no_reshard_after_forward: bool = field( default=False, metadata={"help": "don't reshard parameters after forward pass"}, ) fp32_reduce_scatter: bool = field( default=False, metadata={"help": "reduce-scatter grads in FP32"}, ) cpu_offload: bool = field( default=False, metadata={"help": "offload FP32 params to CPU"} ) use_sharded_state: bool = field( default=False, metadata={"help": "use sharded checkpoint files"}, ) not_fsdp_flatten_parameters: bool = field( default=False, metadata={"help": "not flatten parameter param for fsdp"}, ) # group.add_argument('--deepspeed', nargs='?', const=True, default=False, # help="Enable DeepSpeed with auto-generated config with flag and " \ # "no argument, or pass an argument to a ds_config json to use.") # group.add_argument("--zero", default=0, type=int, help="enable a specific ZeRO stage") # group.add_argument('--exit-interval', type=int, default=None, # help='Exit the program after the iteration is divisible ' # 'by this value.') @dataclass class DatasetConfig(FairseqDataclass): num_workers: int = field( default=1, metadata={"help": "how many subprocesses to use for data loading"} ) skip_invalid_size_inputs_valid_test: bool = field( default=False, metadata={"help": "ignore too long or too short lines in valid and test set"}, ) max_tokens: Optional[int] = field( default=None, metadata={"help": "maximum number of tokens in a batch"} ) batch_size: Optional[int] = field( default=None, metadata={ "help": "number of examples in a batch", "argparse_alias": "--max-sentences", }, ) required_batch_size_multiple: int = field( default=8, metadata={"help": "batch size will be a multiplier of this value"} ) required_seq_len_multiple: int = field( default=1, metadata={ "help": "maximum sequence length in batch will be a multiplier of this value" }, ) dataset_impl: Optional[DATASET_IMPL_CHOICES] = field( default=None, metadata={"help": "output dataset implementation"} ) data_buffer_size: int = field( default=10, metadata={"help": "Number of batches to preload"} ) train_subset: str = field( default="train", metadata={"help": "data subset to use for training (e.g. train, valid, test)"}, ) valid_subset: str = field( default="valid", metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)" }, ) combine_valid_subsets: Optional[bool] = field( default=None, metadata={ "help": "comma separated list of data subsets to use for validation" " (e.g. train, valid, test)", "argparse_alias": "--combine-val", }, ) ignore_unused_valid_subsets: Optional[bool] = field( default=False, metadata={"help": "do not raise error if valid subsets are ignored"}, ) validate_interval: int = field( default=1, metadata={"help": "validate every N epochs"} ) validate_interval_updates: int = field( default=0, metadata={"help": "validate every N updates"} ) validate_after_updates: int = field( default=0, metadata={"help": "dont validate until reaching this many updates"} ) fixed_validation_seed: Optional[int] = field( default=None, metadata={"help": "specified random seed for validation"} ) disable_validation: bool = field( default=False, metadata={"help": "disable validation"} ) max_tokens_valid: Optional[int] = field( default=II("dataset.max_tokens"), metadata={ "help": "maximum number of tokens in a validation batch" " (defaults to --max-tokens)" }, ) batch_size_valid: Optional[int] = field( default=II("dataset.batch_size"), metadata={ "help": "batch size of the validation batch (defaults to --batch-size)", "argparse_alias": "--max-sentences-valid", }, ) max_valid_steps: Optional[int] = field( default=None, metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"}, ) curriculum: int = field( default=0, metadata={"help": "don't shuffle batches for first N epochs"} ) gen_subset: str = field( default="test", metadata={"help": "data subset to generate (train, valid, test)"}, ) num_shards: int = field( default=1, metadata={"help": "shard generation over N shards"} ) shard_id: int = field( default=0, metadata={"help": "id of the shard to generate (id < num_shards)"} ) grouped_shuffling: bool = field( default=False, metadata={ "help": "shuffle batches in groups of num_shards to enable similar sequence lengths on each GPU worker when batches are sorted by length", }, ) update_epoch_batch_itr: bool = field( default=II("dataset.grouped_shuffling"), metadata={ "help": "if true then prevents the reuse the epoch batch iterator by setting can_reuse_epoch_itr to false, defaults to --grouped-shuffling )", }, ) update_ordered_indices_seed: bool = field( default=False, metadata={ "help": "if true then increment seed with epoch for getting batch iterators, defautls to False.", }, ) @dataclass class OptimizationConfig(FairseqDataclass): max_epoch: int = field( default=0, metadata={"help": "force stop training at specified epoch"} ) max_update: int = field( default=0, metadata={"help": "force stop training at specified update"} ) stop_time_hours: float = field( default=0, metadata={ "help": "force stop training after specified cumulative time (if >0)" }, ) clip_norm: float = field( default=0.0, metadata={"help": "clip threshold of gradients"} ) sentence_avg: bool = field( default=False, metadata={ "help": "normalize gradients by the number of sentences in a batch" " (default is to normalize by number of tokens)" }, ) update_freq: List[int] = field( default_factory=lambda: [1], metadata={"help": "update parameters every N_i batches, when in epoch i"}, ) lr: List[float] = field( default_factory=lambda: [0.25], metadata={ "help": "learning rate for the first N epochs; all epochs >N using LR_N" " (note: this may be interpreted differently depending on --lr-scheduler)" }, ) stop_min_lr: float = field( default=-1.0, metadata={"help": "stop training when the learning rate reaches this minimum"}, ) use_bmuf: bool = field( default=False, metadata={ "help": "specify global optimizer for syncing models on different GPUs/shards" }, ) skip_remainder_batch: Optional[bool] = field( default=False, metadata={ "help": "if set, include the last (partial) batch of each epoch in training" " (default is to skip it)." }, ) @dataclass class CheckpointConfig(FairseqDataclass): save_dir: str = field( default="checkpoints", metadata={"help": "path to save checkpoints"} ) restore_file: str = field( default="checkpoint_last.pt", metadata={ "help": "filename from which to load checkpoint " "(default: <save-dir>/checkpoint_last.pt" }, ) continue_once: Optional[str] = field( default=None, metadata={ "help": "continues from this checkpoint, unless a checkpoint indicated in 'restore_file' option is present" }, ) finetune_from_model: Optional[str] = field( default=None, metadata={ "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" }, ) reset_dataloader: bool = field( default=False, metadata={ "help": "if set, does not reload dataloader state from the checkpoint" }, ) reset_lr_scheduler: bool = field( default=False, metadata={ "help": "if set, does not load lr scheduler state from the checkpoint" }, ) reset_meters: bool = field( default=False, metadata={"help": "if set, does not load meters from the checkpoint"}, ) reset_optimizer: bool = field( default=False, metadata={"help": "if set, does not load optimizer state from the checkpoint"}, ) optimizer_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override optimizer args when loading a checkpoint" }, ) save_interval: int = field( default=1, metadata={"help": "save a checkpoint every N epochs"} ) save_interval_updates: int = field( default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} ) keep_interval_updates: int = field( default=-1, metadata={ "help": "keep the last N checkpoints saved with --save-interval-updates" }, ) keep_interval_updates_pattern: int = field( default=-1, metadata={ "help": "when used with --keep-interval-updates, skips deleting " "any checkpoints with update X where " "X %% keep_interval_updates_pattern == 0" }, ) keep_last_epochs: int = field( default=-1, metadata={"help": "keep last N epoch checkpoints"} ) keep_best_checkpoints: int = field( default=-1, metadata={"help": "keep best N checkpoints based on scores"} ) no_save: bool = field( default=False, metadata={"help": "don't save models or checkpoints"} ) no_epoch_checkpoints: bool = field( default=False, metadata={"help": "only store last and best checkpoints"} ) no_last_checkpoints: bool = field( default=False, metadata={"help": "don't store last checkpoints"} ) no_save_optimizer_state: bool = field( default=False, metadata={"help": "don't save optimizer-state as part of checkpoint"}, ) best_checkpoint_metric: str = field( default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} ) maximize_best_checkpoint_metric: bool = field( default=False, metadata={ "help": 'select the largest metric value for saving "best" checkpoints' }, ) patience: int = field( default=-1, metadata={ "help": ( "early stop training if valid performance doesn't " "improve for N consecutive validation runs; note " "that this is influenced by --validate-interval" ) }, ) checkpoint_suffix: str = field( default="", metadata={"help": "suffix to add to the checkpoint file name"} ) checkpoint_shard_count: int = field( default=1, metadata={ "help": "Number of shards containing the checkpoint - " "if the checkpoint is over 300GB, it is preferable " "to split it into shards to prevent OOM on CPU while loading " "the checkpoint" }, ) load_checkpoint_on_all_dp_ranks: bool = field( default=False, metadata={ "help": "load checkpoints on all data parallel devices " "(default: only load on rank 0 and broadcast to other devices)" }, ) write_checkpoints_asynchronously: bool = field( default=False, metadata={ "help": ( "Write checkpoints asynchronously in a separate " "thread. NOTE: This feature is currently being tested." ), "argparse_alias": "--save-async", }, ) model_parallel_size: int = II("common.model_parallel_size") @dataclass class FairseqBMUFConfig(FairseqDataclass): block_lr: float = field( default=1, metadata={"help": "block learning rate for bmuf"} ) block_momentum: float = field( default=0.875, metadata={"help": "block momentum for bmuf"} ) global_sync_iter: int = field( default=50, metadata={"help": "Iteration for syncing global model"} ) warmup_iterations: int = field( default=500, metadata={"help": "warmup iterations for model to broadcast"} ) use_nbm: bool = field( default=False, metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"}, ) average_sync: bool = field( default=False, metadata={ "help": "Specify whether you want to average the local momentum after each sync" }, ) distributed_world_size: int = II("distributed_training.distributed_world_size") @dataclass class GenerationConfig(FairseqDataclass): beam: int = field( default=5, metadata={"help": "beam size"}, ) nbest: int = field( default=1, metadata={"help": "number of hypotheses to output"}, ) max_len_a: float = field( default=0, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) max_len_b: int = field( default=200, metadata={ "help": "generate sequences of maximum length ax + b, where x is the source length" }, ) min_len: int = field( default=1, metadata={"help": "minimum generation length"}, ) match_source_len: bool = field( default=False, metadata={"help": "generations should match the source length"}, ) unnormalized: bool = field( default=False, metadata={"help": "compare unnormalized hypothesis scores"}, ) no_early_stop: bool = field( default=False, metadata={"help": "deprecated"}, ) no_beamable_mm: bool = field( default=False, metadata={"help": "don't use BeamableMM in attention layers"}, ) lenpen: float = field( default=1, metadata={ "help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences" }, ) unkpen: float = field( default=0, metadata={ "help": "unknown word penalty: <0 produces more unks, >0 produces fewer" }, ) replace_unk: Optional[str] = field( default=None, metadata={ "help": "perform unknown replacement (optionally with alignment dictionary)", "argparse_const": "@@ ", }, ) sacrebleu: bool = field( default=False, metadata={"help": "score with sacrebleu"}, ) score_reference: bool = field( default=False, metadata={"help": "just score the reference translation"}, ) prefix_size: int = field( default=0, metadata={"help": "initialize generation by target prefix of given length"}, ) no_repeat_ngram_size: int = field( default=0, metadata={ "help": "ngram blocking such that this size ngram cannot be repeated in the generation" }, ) sampling: bool = field( default=False, metadata={"help": "sample hypotheses instead of using beam search"}, ) sampling_topk: int = field( default=-1, metadata={"help": "sample from top K likely next words instead of all words"}, ) sampling_topp: float = field( default=-1.0, metadata={ "help": "sample from the smallest set whose cumulative probability mass exceeds p for next words" }, ) constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field( default=None, metadata={ "help": "enables lexically constrained decoding", "argparse_const": "ordered", }, ) temperature: float = field( default=1.0, metadata={"help": "temperature for generation"}, ) diverse_beam_groups: int = field( default=-1, metadata={"help": "number of groups for Diverse Beam Search"}, ) diverse_beam_strength: float = field( default=0.5, metadata={"help": "strength of diversity penalty for Diverse Beam Search"}, ) diversity_rate: float = field( default=-1.0, metadata={"help": "strength of diversity penalty for Diverse Siblings Search"}, ) print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field( default=None, metadata={ "help": "if set, uses attention feedback to compute and print alignment to source tokens " "(valid options are: hard, soft, otherwise treated as hard alignment)", "argparse_const": "hard", }, ) print_step: bool = field( default=False, metadata={"help": "print steps"}, ) lm_path: Optional[str] = field( default=None, metadata={"help": "path to lm checkpoint for lm fusion"}, ) lm_weight: float = field( default=0.0, metadata={"help": "weight for lm probs for lm fusion"}, ) # arguments for iterative refinement generator iter_decode_eos_penalty: float = field( default=0.0, metadata={"help": "if > 0.0, it penalized early-stopping in decoding."}, ) iter_decode_max_iter: int = field( default=10, metadata={"help": "maximum iterations for iterative refinement."}, ) iter_decode_force_max_iter: bool = field( default=False, metadata={ "help": "if set, run exact the maximum number of iterations without early stop" }, ) iter_decode_with_beam: int = field( default=1, metadata={ "help": "if > 1, model will generate translations varying by the lengths." }, ) iter_decode_with_external_reranker: bool = field( default=False, metadata={ "help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations" }, ) retain_iter_history: bool = field( default=False, metadata={ "help": "if set, decoding returns the whole history of iterative refinement" }, ) retain_dropout: bool = field( default=False, metadata={"help": "Use dropout at inference time"}, ) # temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed # retain_dropout_modules: Optional[List[str]] = field( retain_dropout_modules: Any = field( default=None, metadata={ "help": "if set, only retain dropout for the specified modules; " "if not set, then dropout will be retained for all modules" }, ) # special decoding format for advanced decoding. decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field( default=None, metadata={"help": "special decoding format for advanced decoding."}, ) no_seed_provided: bool = field( default=False, metadata={"help": "if set, dont use seed for initializing random generators"}, ) @dataclass class CommonEvalConfig(FairseqDataclass): path: Optional[str] = field( default=None, metadata={"help": "path(s) to model file(s), colon separated"}, ) post_process: Optional[str] = field( default=None, metadata={ "help": ( "post-process text by removing BPE, letter segmentation, etc. " "Valid options can be found in fairseq.data.utils.post_process." ), "argparse_const": "subword_nmt", "argparse_alias": "--remove-bpe", }, ) quiet: bool = field(default=False, metadata={"help": "only print final scores"}) model_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override model args at generation that were used during model training" }, ) results_path: Optional[str] = field( default=None, metadata={"help": "path to save eval results (optional)"} ) @dataclass class EvalLMConfig(FairseqDataclass): output_word_probs: bool = field( default=False, metadata={ "help": "if set, outputs words and their predicted log probabilities to standard output" }, ) output_word_stats: bool = field( default=False, metadata={ "help": "if set, outputs word statistics such as word count, average probability, etc" }, ) context_window: int = field( default=0, metadata={ "help": "ensures that every evaluated token has access to a context of at least this size, if possible" }, ) softmax_batch: int = field( default=sys.maxsize, metadata={ "help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory" }, ) @dataclass class InteractiveConfig(FairseqDataclass): buffer_size: int = field( default=0, metadata={ "help": "read this many sentences into a buffer before processing them" }, ) input: str = field( default="-", metadata={"help": "file to read from; use - for stdin"}, ) @dataclass class EMAConfig(FairseqDataclass): store_ema: bool = field( default=False, metadata={help: "store exponential moving average shadow model"} ) ema_decay: float = field( default=0.9999, metadata={"help": "decay for exponential moving average model"} ) ema_start_update: int = field( default=0, metadata={"help": "start EMA update after this many model updates"} ) ema_seed_model: Optional[str] = field( default=None, metadata={ "help": "Seed to load EMA model from. " "Used to load EMA model separately from the actual model." }, ) ema_update_freq: int = field( default=1, metadata={"help": "Do EMA update every this many model updates"} ) ema_fp32: bool = field( default=False, metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"}, ) @dataclass class FairseqConfig(FairseqDataclass): common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() optimization: OptimizationConfig = OptimizationConfig() checkpoint: CheckpointConfig = CheckpointConfig() bmuf: FairseqBMUFConfig = FairseqBMUFConfig() generation: GenerationConfig = GenerationConfig() eval_lm: EvalLMConfig = EvalLMConfig() interactive: InteractiveConfig = InteractiveConfig() model: Any = MISSING task: Any = None criterion: Any = None optimizer: Any = None lr_scheduler: Any = None scoring: Any = None bpe: Any = None tokenizer: Any = None ema: EMAConfig = EMAConfig()
KosmosX-API-main
kosmosX/fairseq/fairseq/dataclass/configs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import logging from hydra.core.config_store import ConfigStore from fairseq.dataclass.configs import FairseqConfig from omegaconf import DictConfig, OmegaConf logger = logging.getLogger(__name__) def hydra_init(cfg_name="config") -> None: cs = ConfigStore.instance() cs.store(name=f"{cfg_name}", node=FairseqConfig) for k in FairseqConfig.__dataclass_fields__: v = FairseqConfig.__dataclass_fields__[k].default try: cs.store(name=k, node=v) except BaseException: logger.error(f"{k} - {v}") raise def add_defaults(cfg: DictConfig) -> None: """This function adds default values that are stored in dataclasses that hydra doesn't know about""" from fairseq.registry import REGISTRIES from fairseq.tasks import TASK_DATACLASS_REGISTRY from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY from fairseq.dataclass.utils import merge_with_parent from typing import Any OmegaConf.set_struct(cfg, False) for k, v in FairseqConfig.__dataclass_fields__.items(): field_cfg = cfg.get(k) if field_cfg is not None and v.type == Any: dc = None if isinstance(field_cfg, str): field_cfg = DictConfig({"_name": field_cfg}) field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"] name = getattr(field_cfg, "_name", None) if k == "task": dc = TASK_DATACLASS_REGISTRY.get(name) elif k == "model": name = ARCH_MODEL_NAME_REGISTRY.get(name, name) dc = MODEL_DATACLASS_REGISTRY.get(name) elif k in REGISTRIES: dc = REGISTRIES[k]["dataclass_registry"].get(name) if dc is not None: cfg[k] = merge_with_parent(dc, field_cfg)
KosmosX-API-main
kosmosX/fairseq/fairseq/dataclass/initialize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, EnumMeta from typing import List class StrEnumMeta(EnumMeta): # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see # https://github.com/facebookresearch/hydra/issues/1156 @classmethod def __instancecheck__(cls, other): return "enum" in str(type(other)) class StrEnum(Enum, metaclass=StrEnumMeta): def __str__(self): return self.value def __eq__(self, other: str): return self.value == other def __repr__(self): return self.value def __hash__(self): return hash(str(self)) def ChoiceEnum(choices: List[str]): """return the Enum class used to enforce list of choices""" return StrEnum("Choices", {k: k for k in choices}) LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) DDP_BACKEND_CHOICES = ChoiceEnum( [ "c10d", # alias for pytorch_ddp "fully_sharded", # FullyShardedDataParallel from fairscale "legacy_ddp", "no_c10d", # alias for legacy_ddp "pytorch_ddp", "slowmo", ] ) DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"]) DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( ["unigram", "ensemble", "vote", "dp", "bs"] ) ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
KosmosX-API-main
kosmosX/fairseq/fairseq/dataclass/constants.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .configs import FairseqDataclass from .constants import ChoiceEnum __all__ = [ "FairseqDataclass", "ChoiceEnum", ]
KosmosX-API-main
kosmosX/fairseq/fairseq/dataclass/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import inspect import logging import os import re from argparse import ArgumentError, ArgumentParser, Namespace from dataclasses import _MISSING_TYPE, MISSING, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import FairseqConfig from hydra.core.global_hydra import GlobalHydra from hydra.experimental import compose, initialize from omegaconf import DictConfig, OmegaConf, open_dict, _utils logger = logging.getLogger(__name__) def eval_str_list(x, x_type=float): if x is None: return None if isinstance(x, str): if len(x) == 0: return [] x = ast.literal_eval(x) try: return list(map(x_type, x)) except TypeError: return [x_type(x)] def interpret_dc_type(field_type): if isinstance(field_type, str): raise RuntimeError("field should be a type") if field_type == Any: return str typestring = str(field_type) if re.match( r"(typing.|^)Union\[(.*), NoneType\]$", typestring ) or typestring.startswith("typing.Optional"): return field_type.__args__[0] return field_type def gen_parser_from_dataclass( parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False, with_prefix: Optional[str] = None, ) -> None: """ convert a dataclass instance to tailing parser arguments. If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are building a flat namespace from a structured dataclass (see transformer_config.py for example). """ def argparse_name(name: str): if name == "data" and (with_prefix is None or with_prefix == ""): # normally data is positional args, so we don't add the -- nor the prefix return name if name == "_name": # private member, skip return None full_name = "--" + name.replace("_", "-") if with_prefix is not None and with_prefix != "": # if a prefix is specified, construct the prefixed arg name full_name = with_prefix + "-" + full_name[2:] # strip -- when composing return full_name def get_kwargs_from_dc( dataclass_instance: FairseqDataclass, k: str ) -> Dict[str, Any]: """k: dataclass attributes""" kwargs = {} field_type = dataclass_instance._get_type(k) inter_type = interpret_dc_type(field_type) field_default = dataclass_instance._get_default(k) if isinstance(inter_type, type) and issubclass(inter_type, Enum): field_choices = [t.value for t in list(inter_type)] else: field_choices = None field_help = dataclass_instance._get_help(k) field_const = dataclass_instance._get_argparse_const(k) if isinstance(field_default, str) and field_default.startswith("${"): kwargs["default"] = field_default else: if field_default is MISSING: kwargs["required"] = True if field_choices is not None: kwargs["choices"] = field_choices if ( isinstance(inter_type, type) and (issubclass(inter_type, List) or issubclass(inter_type, Tuple)) ) or ("List" in str(inter_type) or "Tuple" in str(inter_type)): if "int" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, int) elif "float" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, float) elif "str" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, str) else: raise NotImplementedError( "parsing of type " + str(inter_type) + " is not implemented" ) if field_default is not MISSING: kwargs["default"] = ( ",".join(map(str, field_default)) if field_default is not None else None ) elif ( isinstance(inter_type, type) and issubclass(inter_type, Enum) ) or "Enum" in str(inter_type): kwargs["type"] = str if field_default is not MISSING: if isinstance(field_default, Enum): kwargs["default"] = field_default.value else: kwargs["default"] = field_default elif inter_type is bool: kwargs["action"] = ( "store_false" if field_default is True else "store_true" ) kwargs["default"] = field_default else: kwargs["type"] = inter_type if field_default is not MISSING: kwargs["default"] = field_default # build the help with the hierarchical prefix if with_prefix is not None and with_prefix != "" and field_help is not None: field_help = with_prefix[2:] + ": " + field_help kwargs["help"] = field_help if field_const is not None: kwargs["const"] = field_const kwargs["nargs"] = "?" return kwargs for k in dataclass_instance._get_all_attributes(): field_name = argparse_name(dataclass_instance._get_name(k)) field_type = dataclass_instance._get_type(k) if field_name is None: continue elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass): # for fields that are of type FairseqDataclass, we can recursively # add their fields to the namespace (so we add the args from model, task, etc. to the root namespace) prefix = None if with_prefix is not None: # if a prefix is specified, then we don't want to copy the subfields directly to the root namespace # but we prefix them with the name of the current field. prefix = field_name gen_parser_from_dataclass(parser, field_type(), delete_default, prefix) continue kwargs = get_kwargs_from_dc(dataclass_instance, k) field_args = [field_name] alias = dataclass_instance._get_argparse_alias(k) if alias is not None: field_args.append(alias) if "default" in kwargs: if isinstance(kwargs["default"], str) and kwargs["default"].startswith( "${" ): if kwargs["help"] is None: # this is a field with a name that will be added elsewhere continue else: del kwargs["default"] if delete_default and "default" in kwargs: del kwargs["default"] try: parser.add_argument(*field_args, **kwargs) except ArgumentError: pass def _set_legacy_defaults(args, cls): """Helper to set default arguments based on *add_args*.""" if not hasattr(cls, "add_args"): return import argparse parser = argparse.ArgumentParser( argument_default=argparse.SUPPRESS, allow_abbrev=False ) cls.add_args(parser) # copied from argparse.py: defaults = argparse.Namespace() for action in parser._actions: if action.dest is not argparse.SUPPRESS: if not hasattr(defaults, action.dest): if action.default is not argparse.SUPPRESS: setattr(defaults, action.dest, action.default) for key, default_value in vars(defaults).items(): if not hasattr(args, key): setattr(args, key, default_value) def _override_attr( sub_node: str, data_class: Type[FairseqDataclass], args: Namespace ) -> List[str]: overrides = [] if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass): return overrides def get_default(f): if not isinstance(f.default_factory, _MISSING_TYPE): return f.default_factory() return f.default for k, v in data_class.__dataclass_fields__.items(): if k.startswith("_"): # private member, skip continue val = get_default(v) if not hasattr(args, k) else getattr(args, k) field_type = interpret_dc_type(v.type) if ( isinstance(val, str) and not val.startswith("${") # not interpolation and field_type != str and ( not inspect.isclass(field_type) or not issubclass(field_type, Enum) ) # not choices enum ): # upgrade old models that stored complex parameters as string val = ast.literal_eval(val) if isinstance(val, tuple): val = list(val) v_type = getattr(v.type, "__origin__", None) if ( (v_type is List or v_type is list or v_type is Optional) # skip interpolation and not (isinstance(val, str) and val.startswith("${")) ): # if type is int but val is float, then we will crash later - try to convert here if hasattr(v.type, "__args__"): t_args = v.type.__args__ if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int): val = list(map(t_args[0], val)) elif val is not None and ( field_type is int or field_type is bool or field_type is float ): try: val = field_type(val) except: pass # ignore errors here, they are often from interpolation args if val is None: overrides.append("{}.{}=null".format(sub_node, k)) elif val == "": overrides.append("{}.{}=''".format(sub_node, k)) elif isinstance(val, str): val = val.replace("'", r"\'") overrides.append("{}.{}='{}'".format(sub_node, k, val)) elif isinstance(val, FairseqDataclass): overrides += _override_attr(f"{sub_node}.{k}", type(val), args) elif isinstance(val, Namespace): sub_overrides, _ = override_module_args(val) for so in sub_overrides: overrides.append(f"{sub_node}.{k}.{so}") else: overrides.append("{}.{}={}".format(sub_node, k, val)) return overrides def migrate_registry( name, value, registry, args, overrides, deletes, use_name_as_val=False ): if value in registry: overrides.append("{}={}".format(name, value)) overrides.append("{}._name={}".format(name, value)) overrides.extend(_override_attr(name, registry[value], args)) elif use_name_as_val and value is not None: overrides.append("{}={}".format(name, value)) else: deletes.append(name) def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]: """use the field in args to overrides those in cfg""" overrides = [] deletes = [] for k in FairseqConfig.__dataclass_fields__.keys(): overrides.extend( _override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args) ) if args is not None: if hasattr(args, "task"): from fairseq.tasks import TASK_DATACLASS_REGISTRY migrate_registry( "task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes ) else: deletes.append("task") # these options will be set to "None" if they have not yet been migrated # so we can populate them with the entire flat args CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"} from fairseq.registry import REGISTRIES for k, v in REGISTRIES.items(): if hasattr(args, k): migrate_registry( k, getattr(args, k), v["dataclass_registry"], args, overrides, deletes, use_name_as_val=k not in CORE_REGISTRIES, ) else: deletes.append(k) no_dc = True if hasattr(args, "arch"): from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY if args.arch in ARCH_MODEL_REGISTRY: m_cls = ARCH_MODEL_REGISTRY[args.arch] dc = getattr(m_cls, "__dataclass", None) if dc is not None: m_name = ARCH_MODEL_NAME_REGISTRY[args.arch] overrides.append("model={}".format(m_name)) overrides.append("model._name={}".format(args.arch)) # override model params with those exist in args overrides.extend(_override_attr("model", dc, args)) no_dc = False if no_dc: deletes.append("model") return overrides, deletes class omegaconf_no_object_check: def __init__(self): self.old_is_primitive = _utils.is_primitive_type def __enter__(self): _utils.is_primitive_type = lambda _: True def __exit__(self, type, value, traceback): _utils.is_primitive_type = self.old_is_primitive def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]): # this will be deprecated when we get rid of argparse and model_overrides logic from fairseq.registry import REGISTRIES with open_dict(cfg): for k in cfg.keys(): # "k in cfg" will return false if its a "mandatory value (e.g. ???)" if k in cfg and isinstance(cfg[k], DictConfig): if k in overrides and isinstance(overrides[k], dict): for ok, ov in overrides[k].items(): if isinstance(ov, dict) and cfg[k][ok] is not None: overwrite_args_by_name(cfg[k][ok], ov) else: cfg[k][ok] = ov else: overwrite_args_by_name(cfg[k], overrides) elif k in cfg and isinstance(cfg[k], Namespace): for override_key, val in overrides.items(): setattr(cfg[k], override_key, val) elif k in overrides: if ( k in REGISTRIES and overrides[k] in REGISTRIES[k]["dataclass_registry"] ): cfg[k] = DictConfig( REGISTRIES[k]["dataclass_registry"][overrides[k]] ) overwrite_args_by_name(cfg[k], overrides) cfg[k]._name = overrides[k] else: cfg[k] = overrides[k] def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=False): if remove_missing: if is_dataclass(dc): target_keys = set(dc.__dataclass_fields__.keys()) else: target_keys = set(dc.keys()) with open_dict(cfg): for k in list(cfg.keys()): if k not in target_keys: del cfg[k] merged_cfg = OmegaConf.merge(dc, cfg) merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"] OmegaConf.set_struct(merged_cfg, True) return merged_cfg
KosmosX-API-main
kosmosX/fairseq/fairseq/dataclass/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class BeamableMM(nn.Module): """This module provides an optimized MM for beam decoding with attention. It leverage the fact that the source-side of the input is replicated beam times and the target-side of the input is of width one. This layer speeds up inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. """ def __init__(self, beam_size=None): super(BeamableMM, self).__init__() self.beam_size = beam_size def forward(self, input1, input2): if ( not self.training and self.beam_size is not None # test mode and input1.dim() == 3 # beam size is set and input1.size(1) # only support batched input == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2) def set_beam_size(self, beam_size): self.beam_size = beam_size
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/beamable_mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.scalar_bias import scalar_bias class SingleHeadAttention(nn.Module): """ Single-head attention that supports Gating and Downsampling """ def __init__( self, out_channels, embed_dim, head_dim, head_index, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, num_heads=1, ): super().__init__() self.embed_dim = embed_dim self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_index = head_index self.head_dim = head_dim self.project_input = project_input self.gated = gated self.downsample = downsample self.num_heads = num_heads self.projection = None k_layers = [] v_layers = [] if self.downsample: k_layers.append(Downsample(self.head_index)) v_layers.append(Downsample(self.head_index)) out_proj_size = self.head_dim else: out_proj_size = self.head_dim * self.num_heads if self.gated: k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) else: k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_k = nn.Sequential(*k_layers) self.in_proj_v = nn.Sequential(*v_layers) if self.downsample: self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias) else: self.out_proj = Linear(out_proj_size, out_channels, bias=bias) self.scaling = self.head_dim ** -0.5 def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ src_len, bsz, out_channels = key.size() tgt_len = query.size(0) assert list(query.size()) == [tgt_len, bsz, out_channels] assert key.size() == value.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.downsample: size = bsz else: size = bsz * self.num_heads k = key v = value q = query if self.project_input: q = self.in_proj_q(q) k = self.in_proj_k(k) v = self.in_proj_v(v) src_len = k.size()[0] q *= self.scaling if not self.downsample: q = q.view(tgt_len, size, self.head_dim) k = k.view(src_len, size, self.head_dim) v = v.view(src_len, size, self.head_dim) q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) if mask_future_timesteps: assert ( query.size() == key.size() ), "mask_future_timesteps only applies to self-attention" attn_weights *= torch.tril( attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(), diagonal=-1, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) attn_weights += torch.triu( attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(), diagonal=0, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) tgt_size = tgt_len if use_scalar_bias: attn_weights = scalar_bias(attn_weights, 2) v = scalar_bias(v, 1) tgt_size += 1 if key_padding_mask is not None: # don't attend to padding symbols if key_padding_mask.max() > 0: if self.downsample: attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len) else: attn_weights = attn_weights.view( size, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -math.inf, ) attn_weights = attn_weights.view(size, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout_module(attn_weights) attn = torch.bmm(attn_weights, v) if self.downsample: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) return attn, attn_weights class DownsampledMultiHeadAttention(nn.ModuleList): """ Multi-headed attention with Gating and Downsampling """ def __init__( self, out_channels, embed_dim, num_heads, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, ): self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.downsample = downsample self.gated = gated self.project_input = project_input assert self.head_dim * num_heads == embed_dim if self.downsample: attention_heads = [] for index in range(self.num_heads): attention_heads.append( SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, index, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) ) super().__init__(modules=attention_heads) self.out_proj = Linear(embed_dim, out_channels, bias=bias) else: # either we have a list of attention heads, or just one attention head # if not being downsampled, we can do the heads with one linear layer instead of separate ones super().__init__() self.attention_module = SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, 1, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): src_len, bsz, embed_dim = key.size() tgt_len = query.size(0) assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() tgt_size = tgt_len if use_scalar_bias: tgt_size += 1 attn = [] attn_weights = [] if self.downsample: for attention_head_number in range(self.num_heads): # call the forward of each attention head _attn, _attn_weight = self[attention_head_number]( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn = self.out_proj(full_attn) return full_attn, attn_weights[0].clone() else: _attn, _attn_weight = self.attention_module( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn_weights = torch.cat(attn_weights) full_attn_weights = full_attn_weights.view( bsz, self.num_heads, tgt_size, src_len ) full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads return full_attn, full_attn_weights class Downsample(nn.Module): """ Selects every nth element, where n is the index """ def __init__(self, index): super().__init__() self.index = index def forward(self, x): return x[:: self.index + 1] def Linear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m) def GatedLinear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units""" return nn.Sequential( Linear(in_features, out_features * 4, dropout, bias), nn.GLU(), Linear(out_features * 2, out_features * 2, dropout, bias), nn.GLU(), Linear(out_features, out_features, dropout, bias), )
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/downsampled_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) try: import xentropy_cuda from apex.contrib import xentropy def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): if logits.device == torch.device("cpu"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction) else: if not getattr(cross_entropy, "_has_logged_once", False): logger.info("using fused cross entropy") cross_entropy._has_logged_once = True half_to_float = logits.dtype == torch.half losses = xentropy.SoftmaxCrossEntropyLoss.apply( logits, target, 0.0, ignore_index, half_to_float, ) if reduction == "sum": return losses.sum() elif reduction == "mean": if ignore_index >= 0: return losses.sum() / target.ne(ignore_index).sum() else: return losses.mean() elif reduction == "none": return losses else: raise NotImplementedError except ImportError: def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/cross_entropy.py
import torch class RotaryPositionalEmbedding(torch.nn.Module): def __init__(self, dim, base=10000, precision=torch.half): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf Args: dim: Dimension of embedding base: Base value for exponential precision: precision to use for numerical values """ super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.seq_len_cached = None self.cos_cached = None self.sin_cached = None self.precision = precision def forward(self, x, seq_len=None): """ Args: x: Input x with T X B X C seq_len: Sequence length of input x """ if seq_len != self.seq_len_cached: self.seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.cos_cached = emb.cos()[:, None, None, :] self.sin_cached = emb.sin()[:, None, None, :] return self.cos_cached, self.sin_cached # rotary pos emb helpers: def rotate_half(x): x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] return torch.cat( (-x2, x1), dim=x1.ndim - 1 ) # dim=-1 triggers a bug in earlier torch versions def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): cos, sin = ( cos[offset : q.shape[0] + offset, ...], sin[offset : q.shape[0] + offset, ...], ) return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/rotary_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import Tensor, nn from torch.nn import Parameter from fairseq.modules import LayerNorm @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, attention_norm=False, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim if attention_norm: self.attn_ln = LayerNorm(self.embed_dim) else: self.attn_ln = None self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.skip_embed_dim_check = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def _get_reserve_head_index(self, num_heads_to_keep: int): k_proj_heads_norm = [] q_proj_heads_norm = [] v_proj_heads_norm = [] for i in range(self.num_heads): start_idx = i * self.head_dim end_idx = (i + 1) * self.head_dim k_proj_heads_norm.append( torch.sum( torch.abs( self.k_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.k_proj.bias[start_idx:end_idx])).tolist() ) q_proj_heads_norm.append( torch.sum( torch.abs( self.q_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.q_proj.bias[start_idx:end_idx])).tolist() ) v_proj_heads_norm.append( torch.sum( torch.abs( self.v_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.v_proj.bias[start_idx:end_idx])).tolist() ) heads_norm = [] for i in range(self.num_heads): heads_norm.append( k_proj_heads_norm[i] + q_proj_heads_norm[i] + v_proj_heads_norm[i] ) sorted_head_index = sorted( range(self.num_heads), key=lambda k: heads_norm[k], reverse=True ) reserve_head_index = [] for i in range(num_heads_to_keep): start = sorted_head_index[i] * self.head_dim end = (sorted_head_index[i] + 1) * self.head_dim reserve_head_index.append((start, end)) return reserve_head_index def _adaptive_prune_heads(self, reserve_head_index: List[Tuple[int, int]]): new_q_weight = [] new_q_bias = [] new_k_weight = [] new_k_bias = [] new_v_weight = [] new_v_bias = [] new_out_proj_weight = [] for ele in reserve_head_index: start_idx, end_idx = ele new_q_weight.append( self.q_proj.weight[ start_idx:end_idx, ] ) new_q_bias.append(self.q_proj.bias[start_idx:end_idx]) new_k_weight.append( self.k_proj.weight[ start_idx:end_idx, ] ) new_k_bias.append(self.k_proj.bias[start_idx:end_idx]) new_v_weight.append( self.v_proj.weight[ start_idx:end_idx, ] ) new_v_bias.append(self.v_proj.bias[start_idx:end_idx]) new_out_proj_weight.append(self.out_proj.weight[:, start_idx:end_idx]) new_q_weight = torch.cat(new_q_weight).detach() new_k_weight = torch.cat(new_k_weight).detach() new_v_weight = torch.cat(new_v_weight).detach() new_out_proj_weight = torch.cat(new_out_proj_weight, dim=-1).detach() new_q_weight.requires_grad = True new_k_weight.requires_grad = True new_v_weight.requires_grad = True new_out_proj_weight.requires_grad = True new_q_bias = torch.cat(new_q_bias).detach() new_q_bias.requires_grad = True new_k_bias = torch.cat(new_k_bias).detach() new_k_bias.requires_grad = True new_v_bias = torch.cat(new_v_bias).detach() new_v_bias.requires_grad = True self.q_proj.weight = torch.nn.Parameter(new_q_weight) self.q_proj.bias = torch.nn.Parameter(new_q_bias) self.k_proj.weight = torch.nn.Parameter(new_k_weight) self.k_proj.bias = torch.nn.Parameter(new_k_bias) self.v_proj.weight = torch.nn.Parameter(new_v_weight) self.v_proj.bias = torch.nn.Parameter(new_v_bias) self.out_proj.weight = torch.nn.Parameter(new_out_proj_weight) self.num_heads = len(reserve_head_index) self.embed_dim = self.head_dim * self.num_heads self.q_proj.out_features = self.embed_dim self.k_proj.out_features = self.embed_dim self.v_proj.out_features = self.embed_dim def _set_skip_embed_dim_check(self): self.skip_embed_dim_check = True def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len if not self.skip_embed_dim_check: assert ( embed_dim == self.embed_dim ), f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() # The Multihead attention implemented in pytorch forces strong dimension check # for input embedding dimention and K,Q,V projection dimension. # Since pruning will break the dimension check and it is not easy to modify the pytorch API, # it is preferred to bypass the pytorch MHA when we need to skip embed_dim_check and not self.skip_embed_dim_check and self.attn_ln is None ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, self.embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) if self.attn_ln is not None: attn = self.attn_ln(attn) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2)) def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim class VGGBlock(torch.nn.Module): """ VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf Args: in_channels: (int) number of input channels (typically 1) out_channels: (int) number of output channels conv_kernel_size: convolution channels pooling_kernel_size: the size of the pooling window to take a max over num_conv_layers: (int) number of convolution layers input_dim: (int) input dimension conv_stride: the stride of the convolving kernel. Can be a single number or a tuple (sH, sW) Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple (padH, padW). Default: None layer_norm: (bool) if layer norm is going to be applied. Default: False Shape: Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) """ def __init__( self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False, ): assert ( input_dim is not None ), "Need input_dim for LayerNorm and infer_conv_output_dim" super(VGGBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_kernel_size = _pair(conv_kernel_size) self.pooling_kernel_size = _pair(pooling_kernel_size) self.num_conv_layers = num_conv_layers self.padding = ( tuple(e // 2 for e in self.conv_kernel_size) if padding is None else _pair(padding) ) self.conv_stride = _pair(conv_stride) self.layers = nn.ModuleList() for layer in range(num_conv_layers): conv_op = nn.Conv2d( in_channels if layer == 0 else out_channels, out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding, ) self.layers.append(conv_op) if layer_norm: conv_output_dim, per_channel_dim = infer_conv_output_dim( conv_op, input_dim, in_channels if layer == 0 else out_channels ) self.layers.append(nn.LayerNorm(per_channel_dim)) input_dim = per_channel_dim self.layers.append(nn.ReLU()) if self.pooling_kernel_size is not None: pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) self.layers.append(pool_op) self.total_output_dim, self.output_dim = infer_conv_output_dim( pool_op, input_dim, out_channels ) def forward(self, x): for i, _ in enumerate(self.layers): x = self.layers[i](x) return x
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/vggblock.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from torch import Tensor class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None and len(incremental_state) > 0: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, )
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/learned_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/grad_multiply.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Tuple import torch import torch.nn.functional as F from fairseq.data import Dictionary from torch import nn CHAR_PAD_IDX = 0 CHAR_EOS_IDX = 257 logger = logging.getLogger(__name__) class CharacterTokenEmbedder(torch.nn.Module): def __init__( self, vocab: Dictionary, filters: List[Tuple[int, int]], char_embed_dim: int, word_embed_dim: int, highway_layers: int, max_char_len: int = 50, char_inputs: bool = False, ): super(CharacterTokenEmbedder, self).__init__() self.onnx_trace = False self.embedding_dim = word_embed_dim self.max_char_len = max_char_len self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0) self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim)) self.eos_idx, self.unk_idx = 0, 1 self.char_inputs = char_inputs self.convolutions = nn.ModuleList() for width, out_c in filters: self.convolutions.append( nn.Conv1d(char_embed_dim, out_c, kernel_size=width) ) last_dim = sum(f[1] for f in filters) self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None self.projection = nn.Linear(last_dim, word_embed_dim) assert ( vocab is not None or char_inputs ), "vocab must be set if not using char inputs" self.vocab = None if vocab is not None: self.set_vocab(vocab, max_char_len) self.reset_parameters() def prepare_for_onnx_export_(self): self.onnx_trace = True def set_vocab(self, vocab, max_char_len): word_to_char = torch.LongTensor(len(vocab), max_char_len) truncated = 0 for i in range(len(vocab)): if i < vocab.nspecial: char_idxs = [0] * max_char_len else: chars = vocab[i].encode() # +1 for padding char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars)) if len(char_idxs) > max_char_len: truncated += 1 char_idxs = char_idxs[:max_char_len] word_to_char[i] = torch.LongTensor(char_idxs) if truncated > 0: logger.info( "truncated {} words longer than {} characters".format( truncated, max_char_len ) ) self.vocab = vocab self.word_to_char = word_to_char @property def padding_idx(self): return Dictionary().pad() if self.vocab is None else self.vocab.pad() def reset_parameters(self): nn.init.xavier_normal_(self.char_embeddings.weight) nn.init.xavier_normal_(self.symbol_embeddings) nn.init.xavier_uniform_(self.projection.weight) nn.init.constant_( self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.0 ) nn.init.constant_(self.projection.bias, 0.0) def forward( self, input: torch.Tensor, ): if self.char_inputs: chars = input.view(-1, self.max_char_len) pads = chars[:, 0].eq(CHAR_PAD_IDX) eos = chars[:, 0].eq(CHAR_EOS_IDX) if eos.any(): if self.onnx_trace: chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars) else: chars[eos] = 0 unk = None else: flat_words = input.view(-1) chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as( input ) pads = flat_words.eq(self.vocab.pad()) eos = flat_words.eq(self.vocab.eos()) unk = flat_words.eq(self.vocab.unk()) word_embs = self._convolve(chars) if self.onnx_trace: if pads.any(): word_embs = torch.where( pads.unsqueeze(1), word_embs.new_zeros(1), word_embs ) if eos.any(): word_embs = torch.where( eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs ) if unk is not None and unk.any(): word_embs = torch.where( unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs ) else: if pads.any(): word_embs[pads] = 0 if eos.any(): word_embs[eos] = self.symbol_embeddings[self.eos_idx] if unk is not None and unk.any(): word_embs[unk] = self.symbol_embeddings[self.unk_idx] return word_embs.view(input.size()[:2] + (-1,)) def _convolve( self, char_idxs: torch.Tensor, ): char_embs = self.char_embeddings(char_idxs) char_embs = char_embs.transpose(1, 2) # BTC -> BCT conv_result = [] for conv in self.convolutions: x = conv(char_embs) x, _ = torch.max(x, -1) x = F.relu(x) conv_result.append(x) x = torch.cat(conv_result, dim=-1) if self.highway is not None: x = self.highway(x) x = self.projection(x) return x class Highway(torch.nn.Module): """ A `Highway layer <https://arxiv.org/abs/1505.00387>`_. Adopted from the AllenNLP implementation. """ def __init__(self, input_dim: int, num_layers: int = 1): super(Highway, self).__init__() self.input_dim = input_dim self.layers = nn.ModuleList( [nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)] ) self.activation = nn.ReLU() self.reset_parameters() def reset_parameters(self): for layer in self.layers: # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do that by # setting the bias on `B(x)` to be positive, because that means `g` will be biased to # be high, so we will carry the input forward. The bias on `B(x)` is the second half # of the bias vector in each Linear layer. nn.init.constant_(layer.bias[self.input_dim :], 1) nn.init.constant_(layer.bias[: self.input_dim], 0) nn.init.xavier_normal_(layer.weight) def forward(self, x: torch.Tensor): for layer in self.layers: projection = layer(x) proj_x, gate = projection.chunk(2, dim=-1) proj_x = self.activation(proj_x) gate = torch.sigmoid(gate) x = gate * x + (gate.new_tensor([1]) - gate) * proj_x return x
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools from typing import Any, Dict, List, Tuple, Union import torch import torch.utils.checkpoint as checkpoint from fairseq import utils def checkpoint_wrapper(m, offload_to_cpu=False): """ A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) """ # should I check whether original_forward has already been set? assert not hasattr( m, "precheckpoint_forward" ), "checkpoint function has already been applied?" m.precheckpoint_forward = m.forward m.forward = functools.partial( _checkpointed_forward, m.precheckpoint_forward, # original_forward offload_to_cpu, ) return m def unwrap_checkpoint(m: torch.nn.Module): """ unwrap a module and its children from checkpoint_wrapper """ for module in m.modules(): if hasattr(module, "precheckpoint_forward"): module.forward = module.precheckpoint_forward del module.precheckpoint_forward if hasattr(module, "old_deepcopy_method"): module.__deepcopy__ = module.old_deepcopy_method del module.old_deepcopy_method return m def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs): # Autograd Functions in PyTorch work best with positional args, since # the backward must return gradients (or None) for every input argument. # We can flatten keyword arguments to make this easier. kwarg_keys, flat_args = pack_kwargs(*args, **kwargs) parent_ctx_dict = {"offload": offload_to_cpu} output = CheckpointFunction.apply( original_forward, parent_ctx_dict, kwarg_keys, *flat_args ) if isinstance(output, torch.Tensor): return output else: packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) return output def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]: """ Usage:: kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) args, kwargs = unpack_kwargs(kwarg_keys, flat_args) assert args == [1, 2] assert kwargs == {"a": 3, "b": 4} """ kwarg_keys = [] flat_args = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return kwarg_keys, flat_args def unpack_kwargs( kwarg_keys: List[str], flat_args: List[Any] ) -> Tuple[List[Any], Dict[str, Any]]: if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])} return args, kwargs def split_non_tensors( mixed: Union[torch.Tensor, Tuple[Any]] ) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]: """ Usage:: x = torch.Tensor([1]) y = torch.Tensor([2]) tensors, packed_non_tensors = split_non_tensors((x, y, None, 3)) recon = unpack_non_tensors(tensors, packed_non_tensors) assert recon == (x, y, None, 3) """ if isinstance(mixed, torch.Tensor): return (mixed,), None tensors = [] packed_non_tensors = {"is_tensor": [], "objects": []} for o in mixed: if isinstance(o, torch.Tensor): packed_non_tensors["is_tensor"].append(True) tensors.append(o) else: packed_non_tensors["is_tensor"].append(False) packed_non_tensors["objects"].append(o) return tuple(tensors), packed_non_tensors def unpack_non_tensors( tensors: Tuple[torch.Tensor], packed_non_tensors: Dict[str, List[Any]], ) -> Tuple[Any]: if packed_non_tensors is None: return tensors assert isinstance(packed_non_tensors, dict) mixed = [] is_tensor_list = packed_non_tensors["is_tensor"] objects = packed_non_tensors["objects"] assert len(tensors) + len(objects) == len(is_tensor_list) obj_i = tnsr_i = 0 for is_tensor in is_tensor_list: if is_tensor: mixed.append(tensors[tnsr_i]) tnsr_i += 1 else: mixed.append(objects[obj_i]) obj_i += 1 return tuple(mixed) class CheckpointFunction(torch.autograd.Function): """Similar to the torch version, but support non-Tensor outputs. The caller is expected to provide a dict (*parent_ctx_dict*) that will hold the non-Tensor outputs. These should be combined with the Tensor *outputs* by calling ``unpack_non_tensors``. """ @staticmethod def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args): if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation checkpoint.check_backward_validity(args) ctx.run_function = run_function ctx.kwarg_keys = kwarg_keys ctx.fwd_rng_state = utils.get_rng_state() tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args) if parent_ctx_dict["offload"]: ctx.fwd_device = tuple(x.device for x in tensor_inputs) ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs) tensor_inputs = tuple( x.to(torch.device("cpu"), non_blocking=True) for x in tensor_inputs ) else: ctx.fwd_device, ctx.grad_requirements = None, None ctx.save_for_backward(*tensor_inputs) ctx.packed_non_tensor_inputs = packed_non_tensor_inputs with torch.no_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args) outputs = run_function(*unpacked_args, **unpacked_kwargs) if isinstance(outputs, torch.Tensor): return outputs else: # Autograd Functions don't like non-Tensor outputs. We can split the # non-Tensor and Tensor outputs, returning the former by reference # through *parent_ctx_dict* and returning the latter directly. outputs, packed_non_tensor_outputs = split_non_tensors(outputs) parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad(), please use .backward() if possible" ) tensor_inputs: Tuple = ctx.saved_tensors tensor_inputs = checkpoint.detach_variable(tensor_inputs) if ctx.fwd_device is not None: tensor_inputs = [ t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs) ] for i, need_grad in enumerate(ctx.grad_requirements): tensor_inputs[i].requires_grad = need_grad inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs) # Store the current states. bwd_rng_state = utils.get_rng_state() # Set the states to what it used to be before the forward pass. utils.set_rng_state(ctx.fwd_rng_state) with torch.enable_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs) outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs) tensor_outputs, _ = split_non_tensors(outputs) # Set the states back to what it was at the start of this function. utils.set_rng_state(bwd_rng_state) # Run backward() with only Tensors that require grad outputs_with_grad = [] args_with_grad = [] for i in range(len(tensor_outputs)): if tensor_outputs[i].requires_grad: outputs_with_grad.append(tensor_outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError( "None of the outputs have requires_grad=True, " "this checkpoint() is not necessary" ) torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple( inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs ) return (None, None, None) + grads
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/checkpoint_activations.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import TransformerSentenceEncoderLayer from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): """ Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, ) self.self_attn = SparseMultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=False, add_zero_attn=False, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, )
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import operator import torch import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import nn class TiedLinear(nn.Module): def __init__(self, weight, transpose): super().__init__() self.weight = weight self.transpose = transpose def forward(self, input): return F.linear(input, self.weight.t() if self.transpose else self.weight) class TiedHeadModule(nn.Module): def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size): super().__init__() tied_emb, _ = weights self.num_words, emb_dim = tied_emb.size() self.word_proj = quant_noise( TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size ) if input_dim != emb_dim: self.word_proj = nn.Sequential( quant_noise( nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size ), self.word_proj, ) self.class_proj = quant_noise( nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size ) self.out_dim = self.num_words + num_classes self.register_buffer("_float_tensor", torch.FloatTensor(1)) def forward(self, input): inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) out = self._float_tensor.new(inp_sz, self.out_dim) out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1)) out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1)) return out class AdaptiveSoftmax(nn.Module): """ This is an implementation of the efficient softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309). """ def __init__( self, vocab_size, input_dim, cutoff, dropout, factor=4.0, adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert ( vocab_size == cutoff[-1] ), "cannot specify cutoff larger than vocab size" output_dim = cutoff[0] + len(cutoff) - 1 self.vocab_size = vocab_size self.cutoff = cutoff self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.input_dim = input_dim self.factor = factor self.q_noise = q_noise self.qn_block_size = qn_block_size self.lsm = nn.LogSoftmax(dim=1) if adaptive_inputs is not None: self.head = TiedHeadModule( adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size, ) else: self.head = quant_noise( nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size, ) self._make_tail(adaptive_inputs, tie_proj) def init_weights(m): if ( hasattr(m, "weight") and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule) ): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer("version", torch.LongTensor([1])) def _make_tail(self, adaptive_inputs=None, tie_proj=False): self.tail = nn.ModuleList() for i in range(len(self.cutoff) - 1): dim = int(self.input_dim // self.factor ** (i + 1)) tied_emb, tied_proj = ( adaptive_inputs.weights_for_band(i + 1) if adaptive_inputs is not None else (None, None) ) if tied_proj is not None: if tie_proj: proj = quant_noise( TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size, ) if tied_emb is None: out_proj = nn.Linear( dim, self.cutoff[i + 1] - self.cutoff[i], bias=False ) else: out_proj = TiedLinear(tied_emb, transpose=False) m = nn.Sequential( proj, nn.Dropout(self.dropout_module.p), quant_noise(out_proj, self.q_noise, self.qn_block_size), ) self.tail.append(m) def upgrade_state_dict_named(self, state_dict, name): version_name = name + ".version" if version_name not in state_dict: raise Exception("This version of the model is no longer supported") def adapt_target(self, target): """ In order to be efficient, the AdaptiveSoftMax does not compute the scores for all the word of the vocabulary for all the examples. It is thus necessary to call the method adapt_target of the AdaptiveSoftMax layer inside each forward pass. """ target = target.view(-1) new_target = [target.clone()] target_idxs = [] for i in range(len(self.cutoff) - 1): mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) new_target[0][mask] = self.cutoff[0] + i if mask.any(): target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1)) new_target.append(target[mask].add(-self.cutoff[i])) else: target_idxs.append(None) new_target.append(None) return new_target, target_idxs def forward(self, input, target): """ Args: input: (b x t x d) target: (b x t) Returns: 2 lists: output for each cutoff section and new targets by cut off """ input = input.contiguous().view(-1, input.size(-1)) input = self.dropout_module(input) new_target, target_idxs = self.adapt_target(target) output = [self.head(input)] for i in range(len(target_idxs)): if target_idxs[i] is not None: output.append(self.tail[i](input.index_select(0, target_idxs[i]))) else: output.append(None) return output, new_target def get_log_prob(self, input, target): """ Computes the log probabilities for all the words of the vocabulary, given a 2D tensor of hidden vectors. """ bsz, length, dim = input.size() input = input.contiguous().view(-1, dim) if target is not None: _, target_idxs = self.adapt_target(target) else: target_idxs = None head_y = self.head(input) log_probs = head_y.new_zeros(input.size(0), self.vocab_size) head_sz = self.cutoff[0] + len(self.tail) log_probs[:, :head_sz] = self.lsm(head_y) tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone() for i in range(len(self.tail)): start = self.cutoff[i] end = self.cutoff[i + 1] if target_idxs is None: tail_out = log_probs[:, start:end] tail_out.copy_(self.tail[i](input)) log_probs[:, start:end] = self.lsm(tail_out).add_( tail_priors[:, i, None] ) elif target_idxs[i] is not None: idxs = target_idxs[i] tail_out = log_probs[idxs, start:end] tail_out.copy_(self.tail[i](input[idxs])) log_probs[idxs, start:end] = self.lsm(tail_out).add_( tail_priors[idxs, i, None] ) log_probs = log_probs.view(bsz, length, -1) return log_probs
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/adaptive_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn.modules.utils import _single from torch import Tensor class ConvTBC(torch.nn.Module): """1D convolution over an input of shape (time x batch x channel) The implementation uses gemm to perform the convolution. This implementation is faster than cuDNN for small kernel sizes. """ def __init__(self, in_channels, out_channels, kernel_size, padding=0): super(ConvTBC, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _single(kernel_size) self.padding = _single(padding) self.weight = torch.nn.Parameter( torch.Tensor(self.kernel_size[0], in_channels, out_channels) ) self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.weight) nn.init.zeros_(self.bias) def conv_tbc(self, input: Tensor): return torch.conv_tbc( input.contiguous(), self.weight, self.bias, self.padding[0] ) def forward(self, input: Tensor): return self.conv_tbc(input) def __repr__(self): s = ( "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" ", padding={padding}" ) if self.bias is None: s += ", bias=False" s += ")" return s.format(name=self.__class__.__name__, **self.__dict__)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/conv_tbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ batch norm done in fp32 (for fp16 training) """ import torch import torch.nn as nn class Fp32BatchNorm(nn.Module): def __init__(self, sync=False, *args, **kwargs): super().__init__() if sync: from fairseq.distributed import utils if utils.get_global_world_size() == 1: sync = False if sync: self.bn = nn.SyncBatchNorm(*args, **kwargs) else: self.bn = nn.BatchNorm1d(*args, **kwargs) self.sync = sync def forward(self, input): if self.bn.running_mean.dtype != torch.float: if self.sync: self.bn.running_mean = self.bn.running_mean.float() self.bn.running_var = self.bn.running_var.float() if self.bn.affine: try: self.bn.weight = self.bn.weight.float() self.bn.bias = self.bn.bias.float() except: self.bn.float() else: self.bn.float() output = self.bn(input.float()) return output.type_as(input)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/fp32_batch_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Layer norm done in fp32 (for fp16 training) """ import torch.nn as nn import torch.nn.functional as F class Fp32InstanceNorm(nn.InstanceNorm1d): def __init__(self, *args, **kwargs): self.transpose_last = "transpose_last" in kwargs and kwargs["transpose_last"] if "transpose_last" in kwargs: del kwargs["transpose_last"] super().__init__(*args, **kwargs) def forward(self, input): if self.transpose_last: input = input.transpose(1, 2) output = F.instance_norm( input.float(), running_mean=self.running_mean, running_var=self.running_var, weight=self.weight.float() if self.weight is not None else None, bias=self.bias.float() if self.bias is not None else None, use_input_stats=self.training or not self.track_running_stats, momentum=self.momentum, eps=self.eps, ) if self.transpose_last: output = output.transpose(1, 2) return output.type_as(input)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/fp32_instance_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn.functional as F def unfold1d(x, kernel_size, padding_l, pad_value=0): """unfold T x B x C to T x B x C x K""" if kernel_size > 1: T, B, C = x.size() x = F.pad( x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value ) x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) else: x = x.unsqueeze(3) return x
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/unfold.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch import sys from fairseq import utils from fairseq.distributed import utils as distributed_utils from fairseq.modules.layer_norm import LayerNorm class BaseLayer(nn.Module): def __init__(self, args): super().__init__() self.num_workers = distributed_utils.get_data_parallel_world_size() expert_centroids = torch.empty(self.num_workers, args.decoder_embed_dim) torch.nn.init.orthogonal_(expert_centroids, gain=0.1) self.register_parameter( "expert_centroids", torch.nn.Parameter(expert_centroids) ) self.expert_network = nn.Sequential( *([BaseSublayer(args) for _ in range(args.base_sublayers)]) ) self.expert_id = distributed_utils.get_data_parallel_rank() self.shuffle = args.base_shuffle self.cpp = self.load_assignment() # Add a special attribute to the expert parameters, so we know not to sync their gradients for param in self.expert_network.parameters(): param.expert = True def forward(self, input_features, *args, **kwargs): features = input_features.reshape(-1, input_features.size(-1)) is_training = input_features.requires_grad if self.shuffle and is_training: # Send each token to a random worker, to break correlations within the batch shuffle_sort = torch.randperm(features.size(0), device=features.device) features = All2All.apply(features[shuffle_sort]) with torch.no_grad(): # Compute similarity of each token to each expert, for routing token_expert_affinities = features.matmul( self.expert_centroids.transpose(0, 1) ) # Compute which token goes to which expert sort_by_expert, input_splits, output_splits = ( self.balanced_assignment(token_expert_affinities) if is_training else self.greedy_assignment(token_expert_affinities) ) # Swap these tokens for the right ones for our expert routed_features = All2All.apply( features[sort_by_expert], output_splits, input_splits ) if routed_features.size(0) > 0: # Mix in the expert network based on how appropriate it is for these tokens alpha = torch.sigmoid( routed_features.mv(self.expert_centroids[self.expert_id]) ).unsqueeze(1) routed_features = ( alpha * self.expert_network(routed_features) + (1 - alpha) * routed_features ) # Return to original worker and ordering result = All2All.apply(routed_features, input_splits, output_splits)[ self.inverse_sort(sort_by_expert) ] if self.shuffle and is_training: # Undo shuffling result = All2All.apply(result)[self.inverse_sort(shuffle_sort)] # Return additional Nones for compatibility with TransformerDecoderLayer return result.view(input_features.size()), None, None def inverse_sort(self, order): # Creates an index that undoes a sort: xs==xs[order][inverse_sort(order)] return torch.empty_like(order).scatter_( 0, order, torch.arange(0, order.size(0), device=order.device) ) def balanced_assignment(self, scores): ok = scores.isfinite() if not ok.all(): # NaNs here can break the assignment algorithm scores[~ok] = scores[ok].min() return self.cpp.balanced_assignment(scores), None, None # Assigns each token to the top k experts def greedy_assignment(self, scores, k=1): token_to_workers = torch.topk(scores, dim=1, k=k, largest=True).indices.view(-1) token_to_workers, sort_ordering = torch.sort(token_to_workers) worker2token = sort_ordering // k # Find how many tokens we're sending to each other worker (being careful for sending 0 tokens to some workers) output_splits = torch.zeros( (self.num_workers,), dtype=torch.long, device=scores.device ) workers, counts = torch.unique_consecutive(token_to_workers, return_counts=True) output_splits[workers] = counts # Tell other workers how many tokens to expect from us input_splits = All2All.apply(output_splits) return worker2token, input_splits.tolist(), output_splits.tolist() def load_assignment(self): try: from fairseq import libbase return libbase except ImportError as e: sys.stderr.write( "ERROR: missing libbase. run `python setup.py build_ext --inplace`\n" ) raise e class BaseSublayer(nn.Module): def __init__(self, args): super().__init__() self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") or "relu" ) self.norm = LayerNorm(args.decoder_embed_dim, export=False) self.ff1 = torch.nn.Linear(args.decoder_embed_dim, args.decoder_ffn_embed_dim) self.ff2 = torch.nn.Linear(args.decoder_ffn_embed_dim, args.decoder_embed_dim) self.ff2.weight.data.zero_() def forward(self, xs): return xs + self.ff2(self.activation_fn(self.ff1(self.norm(xs)))) # Wraps torch.distributed.all_to_all_single as a function that supports autograd class All2All(torch.autograd.Function): @staticmethod def forward(ctx, xs, input_splits=None, output_splits=None): ctx.input_splits = input_splits ctx.output_splits = output_splits ys = ( torch.empty_like(xs) if output_splits is None else xs.new_empty(size=[sum(output_splits)] + list(xs.size()[1:])) ) torch.distributed.all_to_all_single( ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits ) return ys @staticmethod def backward(ctx, grad_output): result = ( torch.empty_like(grad_output) if ctx.input_splits is None else grad_output.new_empty( size=[sum(ctx.input_splits)] + list(grad_output.size()[1:]) ) ) torch.distributed.all_to_all_single( result, grad_output, output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits, ) return result, None, None
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/base_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file is to re-implemented the low-rank and beam approximation of CRF layer Proposed by: Sun, Zhiqing, et al. Fast Structured Decoding for Sequence Models https://arxiv.org/abs/1910.11555 The CRF implementation is mainly borrowed from https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py """ import numpy as np import torch import torch.nn as nn def logsumexp(x, dim=1): return torch.logsumexp(x.float(), dim=dim).type_as(x) class DynamicCRF(nn.Module): """Dynamic CRF layer is used to approximate the traditional Conditional Random Fields (CRF) $P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$ where in this function, we assume the emition scores (s) are given, and the transition score is a |V| x |V| matrix $M$ in the following two aspects: (1) it used a low-rank approximation for the transition matrix: $M = E_1 E_2^T$ (2) it used a beam to estimate the normalizing factor Z(x) """ def __init__(self, num_embedding, low_rank=32, beam_size=64): super().__init__() self.E1 = nn.Embedding(num_embedding, low_rank) self.E2 = nn.Embedding(num_embedding, low_rank) self.vocb = num_embedding self.rank = low_rank self.beam = beam_size def extra_repr(self): return "vocab_size={}, low_rank={}, beam_size={}".format( self.vocb, self.rank, self.beam ) def forward(self, emissions, targets, masks, beam=None): """ Compute the conditional log-likelihood of a sequence of target tokens given emission scores Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first targets (`~torch.LongTensor`): Sequence of target token indices ``(batch_size, seq_len) masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.Tensor`: approximated log-likelihood """ numerator = self._compute_score(emissions, targets, masks) denominator = self._compute_normalizer(emissions, targets, masks, beam) return numerator - denominator def forward_decoder(self, emissions, masks=None, beam=None): """ Find the most likely output sequence using Viterbi algorithm. Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.LongTensor`: decoded sequence from the CRF model """ return self._viterbi_decode(emissions, masks, beam) def _compute_score(self, emissions, targets, masks=None): batch_size, seq_len = targets.size() emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2) scores = emission_scores scores[:, 1:] += transition_scores if masks is not None: scores = scores * masks.type_as(scores) return scores.sum(-1) def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None): # HACK: we include "target" which is a hueristic for training # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] if targets is not None: _emissions = emissions.scatter(2, targets[:, :, None], np.float("inf")) beam_targets = _emissions.topk(beam, 2)[1] beam_emission_scores = emissions.gather(2, beam_targets) else: beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K for i in range(1, seq_len): next_score = score[:, :, None] + beam_transition_matrix[:, i - 1] next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], next_score, score) else: score = next_score # Sum (log-sum-exp) over all possible tags return logsumexp(score, dim=1) def _viterbi_decode(self, emissions, masks=None, beam=None): # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) traj_tokens, traj_scores = [], [] finalized_tokens, finalized_scores = [], [] # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K dummy = ( torch.arange(beam, device=score.device).expand(*score.size()).contiguous() ) for i in range(1, seq_len): traj_scores.append(score) _score = score[:, :, None] + beam_transition_matrix[:, i - 1] _score, _index = _score.max(dim=1) _score = _score + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], _score, score) index = torch.where(masks[:, i : i + 1], _index, dummy) else: score, index = _score, _index traj_tokens.append(index) # now running the back-tracing and find the best best_score, best_index = score.max(dim=1) finalized_tokens.append(best_index[:, None]) finalized_scores.append(best_score[:, None]) for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)): previous_index = finalized_tokens[-1] finalized_tokens.append(idx.gather(1, previous_index)) finalized_scores.append(scs.gather(1, previous_index)) finalized_tokens.reverse() finalized_tokens = torch.cat(finalized_tokens, 1) finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0] finalized_scores.reverse() finalized_scores = torch.cat(finalized_scores, 1) finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1] return finalized_scores, finalized_tokens
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/dynamic_crf_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/gelu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ LayerDrop as described in https://arxiv.org/abs/1909.11556. """ import torch import torch.nn as nn class LayerDropModuleList(nn.ModuleList): """ A LayerDrop implementation based on :class:`torch.nn.ModuleList`. We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During evaluation we always iterate over all layers. Usage:: layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) for layer in layers: # this might iterate over layers 1 and 3 x = layer(x) for layer in layers: # this might iterate over all layers x = layer(x) for layer in layers: # this might not iterate over any layers x = layer(x) Args: p (float): probability of dropping out each layer modules (iterable, optional): an iterable of modules to add """ def __init__(self, p, modules=None): super().__init__(modules) self.p = p def __iter__(self): dropout_probs = torch.empty(len(self)).uniform_() for i, m in enumerate(super().__iter__()): if not self.training or (dropout_probs[i] > self.p): yield m
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/layer_drop.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data) class TransformerSentenceEncoder(nn.Module): """ Implementation for a Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of TransformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop: float = 0.0, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, traceable: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, ) -> None: super().__init__() self.padding_idx = padding_idx self.vocab_size = vocab_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.layerdrop = layerdrop self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.num_segments = num_segments self.use_position_embeddings = use_position_embeddings self.apply_bert_init = apply_bert_init self.learned_pos_embedding = learned_pos_embedding self.traceable = traceable self.embed_tokens = self.build_embedding( self.vocab_size, self.embedding_dim, self.padding_idx ) self.embed_scale = embed_scale if q_noise > 0: self.quant_noise = apply_quant_noise_( nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), q_noise, qn_block_size, ) else: self.quant_noise = None self.segment_embeddings = ( nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None) if self.num_segments > 0 else None ) self.embed_positions = ( PositionalEmbedding( self.max_seq_len, self.embedding_dim, padding_idx=(self.padding_idx if offset_positions_by_padding else None), learned=self.learned_pos_embedding, ) if self.use_position_embeddings else None ) if encoder_normalize_before: self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export) else: self.emb_layer_norm = None if self.layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_transformer_sentence_encoder_layer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout_module.p, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) for _ in range(num_encoder_layers) ] ) # Apply initialization of model params after building the model if self.apply_bert_init: self.apply(init_bert_params) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False if freeze_embeddings: freeze_module_params(self.embed_tokens) freeze_module_params(self.segment_embeddings) freeze_module_params(self.embed_positions) freeze_module_params(self.emb_layer_norm) for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer]) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_transformer_sentence_encoder_layer( self, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, q_noise, qn_block_size, ): return TransformerSentenceEncoderLayer( embedding_dim=embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, tokens: torch.Tensor, segment_labels: torch.Tensor = None, last_state_only: bool = False, positions: Optional[torch.Tensor] = None, token_embeddings: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: is_tpu = tokens.device.type == "xla" # compute padding mask. This is needed for multi-head attention padding_mask = tokens.eq(self.padding_idx) if not self.traceable and not is_tpu and not padding_mask.any(): padding_mask = None if token_embeddings is not None: x = token_embeddings else: x = self.embed_tokens(tokens) if self.embed_scale is not None: x = x * self.embed_scale if self.embed_positions is not None: x = x + self.embed_positions(tokens, positions=positions) if self.segment_embeddings is not None and segment_labels is not None: x = x + self.segment_embeddings(segment_labels) if self.quant_noise is not None: x = self.quant_noise(x) if self.emb_layer_norm is not None: x = self.emb_layer_norm(x) x = self.dropout_module(x) # account for padding while computing the representation if padding_mask is not None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(x) for layer in self.layers: x, _ = layer( x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask ) if not last_state_only: inner_states.append(x) sentence_rep = x[0, :, :] if last_state_only: inner_states = [x] if self.traceable: return torch.stack(inner_states), sentence_rep else: return inner_states, sentence_rep
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" from .adaptive_input import AdaptiveInput from .adaptive_softmax import AdaptiveSoftmax from .base_layer import BaseLayer from .beamable_mm import BeamableMM from .character_token_embedder import CharacterTokenEmbedder from .conv_tbc import ConvTBC from .cross_entropy import cross_entropy from .downsampled_multihead_attention import DownsampledMultiHeadAttention from .dynamic_convolution import DynamicConv, DynamicConv1dTBC from .dynamic_crf_layer import DynamicCRF from .fairseq_dropout import FairseqDropout from .fp32_batch_norm import Fp32BatchNorm from .fp32_group_norm import Fp32GroupNorm from .fp32_instance_norm import Fp32InstanceNorm from .gelu import gelu, gelu_accurate from .grad_multiply import GradMultiply from .gumbel_vector_quantizer import GumbelVectorQuantizer from .kmeans_vector_quantizer import KmeansVectorQuantizer from .layer_drop import LayerDropModuleList from .layer_norm import Fp32LayerNorm, LayerNorm from .learned_positional_embedding import LearnedPositionalEmbedding from .lightweight_convolution import LightweightConv, LightweightConv1dTBC from .linearized_convolution import LinearizedConvolution from .location_attention import LocationAttention from .lstm_cell_with_zoneout import LSTMCellWithZoneOut from .multihead_attention import MultiheadAttention from .positional_embedding import PositionalEmbedding from .same_pad import SamePad from .scalar_bias import ScalarBias from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer from .transformer_sentence_encoder import TransformerSentenceEncoder from .transpose_last import TransposeLast from .unfold import unfold1d from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer from .vggblock import VGGBlock from .espnet_multihead_attention import ( RelPositionMultiHeadedAttention, RotaryPositionMultiHeadedAttention, ) from .rotary_positional_embedding import RotaryPositionalEmbedding from .positional_encoding import ( RelPositionalEncoding, ) __all__ = [ "AdaptiveInput", "AdaptiveSoftmax", "BaseLayer", "BeamableMM", "CharacterTokenEmbedder", "ConvTBC", "cross_entropy", "DownsampledMultiHeadAttention", "DynamicConv1dTBC", "DynamicConv", "DynamicCRF", "FairseqDropout", "Fp32BatchNorm", "Fp32GroupNorm", "Fp32LayerNorm", "Fp32InstanceNorm", "gelu", "gelu_accurate", "GradMultiply", "GumbelVectorQuantizer", "KmeansVectorQuantizer", "LayerDropModuleList", "LayerNorm", "LearnedPositionalEmbedding", "LightweightConv1dTBC", "LightweightConv", "LinearizedConvolution", "LocationAttention", "LSTMCellWithZoneOut", "MultiheadAttention", "PositionalEmbedding", "SamePad", "ScalarBias", "SinusoidalPositionalEmbedding", "TransformerSentenceEncoderLayer", "TransformerSentenceEncoder", "TransformerDecoderLayer", "TransformerEncoderLayer", "TransposeLast", "VGGBlock", "unfold1d", "ESPNETMultiheadedAttention", "PositionalEmbedding", "RelPositionMultiHeadedAttention", "RelPositionalEncoding", "RotaryPositionalEmbedding", "RotaryPositionMultiHeadedAttention", ]
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.modules import TransformerSentenceEncoder from fairseq.modules.sparse_transformer_sentence_encoder_layer import ( SparseTransformerSentenceEncoderLayer, ) class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): """ Sparse implementation of the TransformerSentenceEncoder - see SparseMultiheadAttention """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export, ) self.layers = nn.ModuleList( [ SparseTransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, ) for _ in range(num_encoder_layers) ] ) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer])
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/sparse_transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from .conv_tbc import ConvTBC from typing import Dict, Optional from torch import Tensor @with_incremental_state class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def state_dict(self, destination=None, prefix="", keep_vars=False): state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) # don't store redundant _linearized_weight in checkpoints if prefix + "_linearized_weight" in state: del state[prefix + "_linearized_weight"] return state def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" if prefix + "_linearized_weight" in state_dict: del state_dict[prefix + "_linearized_weight"] @torch.jit.export def forward( self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = self.conv_tbc(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[: -self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) @torch.jit.unused def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) @torch.jit.unused def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): return utils.get_incremental_state(self, incremental_state, "input_buffer") @torch.jit.unused def _set_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer, ): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) @torch.jit.unused def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) return weight.view(self.out_channels, -1) return self._linearized_weight @torch.jit.unused def _clear_linearized_weight(self, *args): self._linearized_weight = None
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/linearized_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) class TransformerEncoderLayerBase(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.encoder.normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, cfg): super().__init__() self.cfg = cfg self.embed_dim = cfg.encoder.embed_dim self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.self_attn = self.build_self_attention(self.embed_dim, cfg) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.encoder.normalize_before self.fc1 = self.build_fc1( self.embed_dim, cfg.encoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.encoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def _get_fc_rank(self, remove_num: int) -> List[int]: f1_filter_param = [] for i in range(self.fc1.out_features): f1_filter_param.append( torch.sum(torch.abs(self.fc1.weight[i])) + torch.sum(torch.abs(self.fc2.weight[:, i])) + torch.abs(self.fc1.bias[i]) ) return sorted( range(len(f1_filter_param)), key=lambda k: f1_filter_param[k], reverse=False )[0:remove_num] def _prune_fc_layer(self, remove_index: List[int]): new_fc1_weight = [] new_fc1_bias = [] for i in range(self.fc1.out_features): if i not in remove_index: new_fc1_weight.append(self.fc1.weight[i]) new_fc1_bias.append(self.fc1.bias[i]) new_fc1_weight = torch.stack(new_fc1_weight).detach() new_fc1_weight.requires_grad = True new_fc1_bias = torch.stack(new_fc1_bias).detach() new_fc1_bias.requires_grad = True self.fc1 = quant_noise( nn.Linear(self.fc1.in_features, self.fc1.out_features - len(remove_index)), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc1.weight = torch.nn.Parameter(new_fc1_weight) self.fc1.bias = torch.nn.Parameter(new_fc1_bias) new_fc2_weight = [] new_fc2_bias = [] for i in range(self.fc2.in_features): if i not in remove_index: new_fc2_weight.append(self.fc2.weight[:, i]) new_fc2_bias = self.fc2.bias.detach() new_fc2_weight = torch.stack(new_fc2_weight, dim=-1).detach() new_fc2_weight.requires_grad = True new_fc2_bias = self.fc2.bias.detach() new_fc2_bias.requires_grad = True self.fc2 = quant_noise( nn.Linear(self.fc2.in_features - len(remove_index), self.fc2.out_features), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc2.weight = torch.nn.Parameter(new_fc2_weight) self.fc2.bias = torch.nn.Parameter(new_fc2_bias) def build_self_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def residual_connection(self, x, residual): return residual + x def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, seq_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`, where `tgt_len` is the length of output and `src_len` is the length of input, though here both are equal to `seq_len`. `attn_mask[tgt_i, src_j] = 1` means that when calculating the embedding for `tgt_i`, we exclude (mask out) `src_j`. This is useful for strided self-attention. Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, attn_mask=attn_mask, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x # backward compatible with the legacy argparse format class TransformerEncoderLayer(TransformerEncoderLayerBase): def __init__(self, args): super().__init__(TransformerConfig.from_namespace(args)) self.args = args def build_self_attention(self, embed_dim, args): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args) ) class TransformerDecoderLayerBase(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.decoder.normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = cfg.decoder.embed_dim self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.cross_self_attention = cfg.cross_self_attention self.self_attn = self.build_self_attention( self.embed_dim, cfg, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.attn_ln = ( LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, "scale_attn", False) else None ) self.nh = self.self_attn.num_heads self.head_dim = self.self_attn.head_dim scale_heads = utils.safe_getattr(cfg, "scale_heads", False) self.c_attn = ( nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.decoder.normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.ffn_layernorm = ( LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, "deepnet", False) else None ) self.w_resid = ( nn.Parameter( torch.ones( self.embed_dim, ), requires_grad=True, ) if utils.safe_getattr(cfg, "scale_resids", False) else None ) self.fc1 = self.build_fc1( self.embed_dim, cfg.decoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.decoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False ): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, dropout=cfg.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not cfg.cross_self_attention, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, attention_norm=True if utils.safe_getattr(cfg, "deepnet", False) else False, ) def build_encoder_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, kdim=cfg.encoder.embed_dim, vdim=cfg.encoder.embed_dim, dropout=cfg.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def prepare_for_onnx_export_(self): self.onnx_trace = True def residual_connection(self, x, residual): return residual + x def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) if self.c_attn is not None: tgt_len, bsz = x.size(0), x.size(1) x = x.view(tgt_len, bsz, self.nh, self.head_dim) x = torch.einsum("tbhd,h->tbhd", x, self.c_attn) x = x.reshape(tgt_len, bsz, self.embed_dim) if self.attn_ln is not None: x = self.attn_ln(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) if self.encoder_attn is not None and encoder_out is not None: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = self.dropout_module(x) if self.w_resid is not None: residual = torch.mul(self.w_resid, residual) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None def make_generation_fast_(self, need_attn: bool = False, **kwargs): self.need_attn = need_attn # backward compatible with the legacy argparse format class TransformerDecoderLayer(TransformerDecoderLayerBase): def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.args = args def build_self_attention( self, embed_dim, args, add_bias_kv=False, add_zero_attn=False ): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args), add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) def build_encoder_attention(self, embed_dim, args): return super().build_encoder_attention( embed_dim, TransformerConfig.from_namespace(args), )
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", export: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, init_fn: Callable = None, ) -> None: super().__init__() if init_fn is not None: init_fn() # Initialize parameters self.embedding_dim = embedding_dim self.num_attention_heads = num_attention_heads self.attention_dropout = attention_dropout self.q_noise = q_noise self.qn_block_size = qn_block_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.activation_dropout_module = FairseqDropout( activation_dropout, module_name=self.__class__.__name__ ) # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = self.build_self_attention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) self.fc1 = self.build_fc1( self.embedding_dim, ffn_embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) self.fc2 = self.build_fc2( ffn_embedding_dim, self.embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, num_attention_heads, dropout, self_attention, q_noise, qn_block_size, ): return MultiheadAttention( embed_dim, num_attention_heads, dropout=dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, x: torch.Tensor, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer implementation. """ residual = x x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.final_layer_norm(x) return x, attn
KosmosX-API-main
kosmosX/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py