File size: 1,399 Bytes
ff73b98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import torch
import torch.nn as nn

class OMNILITEUnifiedSparseMultimodalTransformer(nn.Module):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Conv2d(**{"in_channels":3,"out_channels":1024,"kernel_size":14,"stride":14,"note":"Vision Patch Embedding for ViT encoder"}),
            nn.TransformerBlock(**{"embed_dim":1024,"num_heads":16,"ff_dim":4096,"depth":12,"note":"Lightweight Vision Transformer (ViT) Backbone"}),
            nn.TransformerBlock(**{"type":"PerceiverResampler","num_latents":64,"embed_dim":2048,"note":"Maps visual features to text latent space"}),
            nn.Linear(**{"in_features":32000,"out_features":2048,"note":"Text Token Embedding layer"}),
            nn.TransformerBlock(**{"type":"GQA_MoE_Layer","repeat":24,"num_experts":16,"top_k":2,"hidden_dim":2048,"num_heads":32,"num_kv_heads":8,"rope_dim":64,"note":"Shared Backbone: 480M active parameters per token"}),
            nn.Linear(**{"in_features":2048,"out_features":32000,"note":"Causal Language Modeling (CLM) Head"}),
            nn.Linear(**{"in_features":2048,"out_features":64,"note":"Rectified Flow-Matching (RFM) Head for DiT Latents"}),
            nn.Conv2d(**{"in_channels":4,"out_channels":3,"kernel_size":3,"stride":1,"note":"VQ-VAE Decoder for 8x8 Latent Reconstruction"})
        )

    def forward(self, x):
        return self.layers(x)