| | from matplotlib.pyplot import cla |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | import einops |
| | from inspect import isfunction |
| |
|
| | from typing import Optional, Tuple |
| |
|
| | import logging |
| | import math |
| | from typing import Optional |
| |
|
| | import torch |
| | import torch.nn as nn |
| | from torch.nn import functional as F |
| | from omegaconf import DictConfig |
| | import einops |
| | import matplotlib.pyplot as plt |
| | import os |
| | import numpy as np |
| |
|
| | from .position_embeddings import * |
| |
|
| |
|
| | |
| | _attention_instance_counter = 0 |
| | |
| | _cross_attn_accumulated_data = {} |
| |
|
| | |
| | |
| | def _get_cross_attn_vis_mode(): |
| | mode = os.environ.get('CROSS_ATTN_VIS_MODE', 'average').lower() |
| | if mode not in ['average', 'separate']: |
| | mode = 'average' |
| | return mode |
| |
|
| |
|
| | def default(val, d): |
| | if exists(val): |
| | return val |
| | return d() if isfunction(d) else d |
| |
|
| |
|
| | class LayerNorm(nn.Module): |
| | """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ |
| |
|
| | def __init__(self, ndim, bias): |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(ndim)) |
| | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None |
| |
|
| | def forward(self, input): |
| | return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) |
| |
|
| |
|
| |
|
| | |
| | class RMSNorm(nn.Module): |
| | def __init__(self, dim: int, eps: float = 1e-8) -> None: |
| | super().__init__() |
| | self.scale, self.eps = dim**-0.5, eps |
| | self.g = nn.Parameter(torch.ones(dim)) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | norm = torch.norm(x, dim=-1, keepdim=True) * self.scale |
| | return x / norm.clamp(min=self.eps) * self.g |
| |
|
| |
|
| | |
| | class SwishGLU(nn.Module): |
| | def __init__(self, in_dim: int, out_dim: int) -> None: |
| | super().__init__() |
| | self.act, self.project = nn.SiLU(), nn.Linear(in_dim, 2 * out_dim) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | projected, gate = self.project(x).tensor_split(2, dim=-1) |
| | return projected * self.act(gate) |
| |
|
| |
|
| |
|
| | class Attention(nn.Module): |
| |
|
| | def __init__( |
| | self, |
| | n_embd: int, |
| | n_head: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | block_size: int, |
| | causal: bool = False, |
| | bias=False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | rotary_emb_dim = None, |
| | rotary_xpos_scale_base = 512, |
| | rotary_interpolation_factor = 1., |
| | ): |
| | super().__init__() |
| | assert n_embd % n_head == 0 |
| | |
| | self.key = nn.Linear(n_embd, n_embd) |
| | self.query = nn.Linear(n_embd, n_embd) |
| | self.value = nn.Linear(n_embd, n_embd) |
| | |
| | self.c_proj = nn.Linear(n_embd, n_embd, bias=bias) |
| | |
| | self.attn_dropout = nn.Dropout(attn_pdrop) |
| | self.resid_dropout = nn.Dropout(resid_pdrop) |
| | self.n_head = n_head |
| | self.n_embd = n_embd |
| | self.causal = causal |
| | |
| | self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') |
| | if not self.flash: |
| | print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0") |
| | |
| | self.register_buffer("bias", torch.tril(torch.ones(block_size, block_size)) |
| | .view(1, 1, block_size, block_size)) |
| | self.use_rot_embed = use_rot_embed |
| | if self.use_rot_embed: |
| | |
| | |
| | |
| | |
| | |
| | rotary_emb_dim = max(default(rotary_emb_dim, self.n_head // 2), 32) |
| | self.rotary_pos_emb = RotaryEmbedding( |
| | rotary_emb_dim, |
| | use_xpos = rotary_xpos, |
| | xpos_scale_base = rotary_xpos_scale_base, |
| | interpolate_factor = rotary_interpolation_factor, |
| | ) |
| |
|
| | |
| | |
| | global _attention_instance_counter |
| | self._instance_id = _attention_instance_counter |
| | _attention_instance_counter += 1 |
| | |
| | |
| | self._vis_counter = 0 |
| | |
| | def _visualize_attention(self, att: torch.Tensor, is_cross_attention: bool = False): |
| | """ |
| | 可视化attention权重(softmax后的) |
| | att: (B, nh, T_q, T_k) attention权重矩阵 |
| | is_cross_attention: 是否为cross-attention |
| | """ |
| | B = att.shape[0] |
| | |
| | |
| | if is_cross_attention: |
| | global _cross_attn_accumulated_data |
| | |
| | |
| | vis_dir = "attention_vis" |
| | os.makedirs(vis_dir, exist_ok=True) |
| | |
| | |
| | for b_idx in range(B): |
| | |
| | att_vis = att[b_idx].mean(dim=0).detach().cpu().numpy() |
| | |
| | |
| | att_vis_1d = att_vis.mean(axis=0) |
| | |
| | |
| | |
| | chunk_size = 32 |
| | |
| | att_vis_filtered = att_vis_1d[1:161] |
| | |
| | n_chunks = len(att_vis_filtered) // chunk_size |
| | att_vis_accumulated = [] |
| | x_labels = [] |
| | |
| | for i in range(n_chunks): |
| | start_idx_in_filtered = i * chunk_size |
| | end_idx_in_filtered = (i + 1) * chunk_size |
| | |
| | actual_start_idx = start_idx_in_filtered + 1 |
| | actual_end_idx = end_idx_in_filtered |
| | chunk_sum = att_vis_filtered[start_idx_in_filtered:end_idx_in_filtered].sum() |
| | att_vis_accumulated.append(chunk_sum) |
| | x_labels.append(f'{actual_start_idx}-{actual_end_idx}') |
| | |
| | |
| | if len(att_vis_filtered) % chunk_size != 0: |
| | start_idx_in_filtered = n_chunks * chunk_size |
| | actual_start_idx = start_idx_in_filtered + 1 |
| | chunk_sum = att_vis_filtered[start_idx_in_filtered:].sum() |
| | att_vis_accumulated.append(chunk_sum) |
| | x_labels.append(f'{actual_start_idx}-{len(att_vis_1d)-1}') |
| | |
| | att_vis_accumulated = np.array(att_vis_accumulated) |
| | |
| | |
| | if b_idx not in _cross_attn_accumulated_data: |
| | _cross_attn_accumulated_data[b_idx] = {} |
| | _cross_attn_accumulated_data[b_idx][self._instance_id] = { |
| | 'data': att_vis_accumulated, |
| | 'x_labels': x_labels, |
| | 'step': self._vis_counter, |
| | 'full_map': att_vis |
| | } |
| | |
| | |
| | if b_idx in _cross_attn_accumulated_data and len(_cross_attn_accumulated_data[b_idx]) >= 4: |
| | |
| | self._save_accumulated_cross_attn_to_txt(b_idx) |
| | |
| | self._save_last_layer_heatmap(b_idx) |
| | |
| | self._plot_accumulated_cross_attn(b_idx) |
| | |
| | self._vis_counter += 1 |
| | else: |
| | |
| | pass |
| | |
| | def _save_accumulated_cross_attn_to_txt(self, b_idx): |
| | """保存所有层cross-attention的累计结果到txt文件""" |
| | global _cross_attn_accumulated_data |
| | |
| | if b_idx not in _cross_attn_accumulated_data: |
| | return |
| | |
| | layer_data = _cross_attn_accumulated_data[b_idx] |
| | |
| | |
| | if len(layer_data) < 4: |
| | return |
| | |
| | vis_dir = "attention_vis" |
| | os.makedirs(vis_dir, exist_ok=True) |
| | |
| | |
| | x_labels = None |
| | for layer_id in sorted(layer_data.keys()): |
| | if x_labels is None: |
| | x_labels = layer_data[layer_id]['x_labels'] |
| | break |
| | |
| | sorted_layer_ids = sorted(layer_data.keys())[:4] |
| | step = layer_data[sorted_layer_ids[0]]['step'] |
| | |
| | |
| | all_data = {} |
| | for layer_id in sorted_layer_ids: |
| | all_data[layer_id] = layer_data[layer_id]['data'] |
| | |
| | |
| | all_data_array = np.stack([all_data[layer_id] for layer_id in sorted_layer_ids], axis=0) |
| | averaged_data = all_data_array.mean(axis=0) |
| | |
| | |
| | filename = os.path.join(vis_dir, f'attn_accumulated_step{step:04d}_batch{b_idx}.txt') |
| | with open(filename, 'w') as f: |
| | f.write(f"Cross-Attention Accumulated Data - Batch {b_idx}, Step {step}\n") |
| | f.write("=" * 60 + "\n\n") |
| | |
| | |
| | f.write("Key Position Ranges (每32个累计):\n") |
| | f.write(", ".join(x_labels) + "\n\n") |
| | |
| | |
| | f.write("Separate Mode - Each Layer Data:\n") |
| | f.write("-" * 60 + "\n") |
| | for layer_id in sorted_layer_ids: |
| | data = all_data[layer_id] |
| | f.write(f"Layer {layer_id}:\n") |
| | f.write(", ".join([f"{val:.6f}" for val in data]) + "\n") |
| | f.write(f" Sum: {data.sum():.6f}, Mean: {data.mean():.6f}, Max: {data.max():.6f}, Min: {data.min():.6f}\n\n") |
| | |
| | |
| | f.write("Average Mode - Average of 4 Layers (calculated from separate data):\n") |
| | f.write("-" * 60 + "\n") |
| | f.write(", ".join([f"{val:.6f}" for val in averaged_data]) + "\n") |
| | f.write(f" Sum: {averaged_data.sum():.6f}, Mean: {averaged_data.mean():.6f}, Max: {averaged_data.max():.6f}, Min: {averaged_data.min():.6f}\n") |
| | |
| | print(f"Cross-Attention数据已保存到txt: {filename} (Batch {b_idx})") |
| | print(f" 包含4层separate数据和average数据(从separate计算得出)") |
| | |
| | def _save_last_layer_heatmap(self, b_idx): |
| | """仅保存最后一层cross-attention的热力图(不压缩action维度,但key维度按32分组)""" |
| | global _cross_attn_accumulated_data |
| |
|
| | if b_idx not in _cross_attn_accumulated_data: |
| | return |
| |
|
| | layer_data = _cross_attn_accumulated_data[b_idx] |
| |
|
| | if len(layer_data) < 4: |
| | return |
| |
|
| | |
| | last_layer_id = max(layer_data.keys()) |
| | if 'full_map' not in layer_data[last_layer_id]: |
| | return |
| |
|
| | heatmap = layer_data[last_layer_id]['full_map'] |
| | step = layer_data[last_layer_id]['step'] |
| |
|
| | |
| | chunk_size = 32 |
| | T_q, T_k = heatmap.shape |
| | |
| | |
| | |
| | end_idx_filtered = min(161, T_k) |
| | heatmap_filtered = heatmap[:, 1:end_idx_filtered] |
| | |
| | |
| | n_chunks = heatmap_filtered.shape[1] // chunk_size |
| | heatmap_compressed = [] |
| | |
| | fixed_x_labels = ['-40', '-20', '0', '20', '40'] |
| | |
| | for i in range(n_chunks): |
| | start_idx = i * chunk_size |
| | end_idx = (i + 1) * chunk_size |
| | |
| | chunk_sum = heatmap_filtered[:, start_idx:end_idx].sum(axis=1, keepdims=True) |
| | heatmap_compressed.append(chunk_sum) |
| | |
| | |
| | if heatmap_filtered.shape[1] % chunk_size != 0: |
| | start_idx = n_chunks * chunk_size |
| | chunk_sum = heatmap_filtered[:, start_idx:].sum(axis=1, keepdims=True) |
| | heatmap_compressed.append(chunk_sum) |
| | |
| | |
| | x_labels = fixed_x_labels[:len(heatmap_compressed)] |
| | |
| | |
| | if len(heatmap_compressed) > 0: |
| | heatmap_compressed = np.concatenate(heatmap_compressed, axis=1) |
| | else: |
| | |
| | heatmap_compressed = np.zeros((T_q, 1)) |
| |
|
| | vis_dir = "attention_vis" |
| | os.makedirs(vis_dir, exist_ok=True) |
| |
|
| | |
| | plt.rcParams['font.family'] = 'Times New Roman' |
| | |
| | plt.figure(figsize=(10, 8)) |
| | plt.imshow(heatmap_compressed, cmap='viridis', aspect='auto') |
| | cbar = plt.colorbar(label='Attention Weight') |
| | cbar.set_label('Attention Weight', fontsize=24, fontfamily='Times New Roman') |
| | cbar.ax.tick_params(labelsize=20) |
| | |
| | for label in cbar.ax.get_yticklabels(): |
| | label.set_fontfamily('Times New Roman') |
| | |
| | plt.xlabel('View angle', fontsize=24, fontfamily='Times New Roman') |
| | plt.ylabel('Action', fontsize=24, fontfamily='Times New Roman') |
| | plt.xticks(range(len(x_labels)), x_labels, rotation=0, ha='center', fontsize=20) |
| | plt.yticks(fontsize=20) |
| | |
| | for label in plt.gca().get_xticklabels(): |
| | label.set_fontfamily('Times New Roman') |
| | for label in plt.gca().get_yticklabels(): |
| | label.set_fontfamily('Times New Roman') |
| | plt.tight_layout() |
| |
|
| | filename = os.path.join(vis_dir, f'attn_heatmap_last_layer_step{step:04d}_batch{b_idx}.png') |
| | plt.savefig(filename, dpi=150, bbox_inches='tight') |
| | plt.close() |
| | |
| | print(f"最后一层Cross-Attention热力图已保存: {filename} (Batch {b_idx}, Layer {last_layer_id}, Shape: {heatmap_compressed.shape})") |
| |
|
| | def _plot_accumulated_cross_attn(self, b_idx): |
| | """绘制所有层cross-attention的累计结果""" |
| | global _cross_attn_accumulated_data |
| | |
| | if b_idx not in _cross_attn_accumulated_data: |
| | return |
| | |
| | layer_data = _cross_attn_accumulated_data[b_idx] |
| | |
| | |
| | if len(layer_data) < 4: |
| | return |
| | |
| | vis_mode = _get_cross_attn_vis_mode() |
| | vis_dir = "attention_vis" |
| | |
| | |
| | plt.rcParams['font.family'] = 'Times New Roman' |
| | |
| | |
| | plt.figure(figsize=(12, 6)) |
| | |
| | |
| | x_labels = None |
| | for layer_id in sorted(layer_data.keys()): |
| | if x_labels is None: |
| | x_labels = layer_data[layer_id]['x_labels'] |
| | break |
| | |
| | sorted_layer_ids = sorted(layer_data.keys())[:4] |
| | |
| | if vis_mode == 'average': |
| | |
| | all_data = [] |
| | for layer_id in sorted_layer_ids: |
| | data = layer_data[layer_id]['data'] |
| | all_data.append(data) |
| | |
| | |
| | all_data_array = np.stack(all_data, axis=0) |
| | averaged_data = all_data_array.mean(axis=0) |
| | |
| | |
| | step = layer_data[sorted_layer_ids[0]]['step'] |
| | plt.plot(range(len(averaged_data)), averaged_data, linewidth=2, marker='o', markersize=4, |
| | color='steelblue', alpha=0.8, label='Average (4 Layers)') |
| | |
| | plt.title(f'Average Cross-Attention (All 4 Layers) - Batch {b_idx}', fontsize=18, fontfamily='Times New Roman') |
| | else: |
| | |
| | colors = ['steelblue', 'coral', 'mediumseagreen', 'mediumpurple'] |
| | for i, layer_id in enumerate(sorted_layer_ids): |
| | data = layer_data[layer_id]['data'] |
| | step = layer_data[layer_id]['step'] |
| | color = colors[i % len(colors)] |
| | plt.plot(range(len(data)), data, linewidth=2, marker='o', markersize=4, |
| | color=color, alpha=0.8, label=f'Layer {layer_id}') |
| | |
| | plt.title(f'Accumulated Cross-Attention (All Layers) - Batch {b_idx}', fontsize=18, fontfamily='Times New Roman') |
| | legend = plt.legend(loc='best', fontsize=16) |
| | for text in legend.get_texts(): |
| | text.set_fontfamily('Times New Roman') |
| | |
| | plt.xlabel('Key Position (每32个累计)', fontsize=18, fontfamily='Times New Roman') |
| | plt.ylabel('Cumulative Attention Weight', fontsize=18, fontfamily='Times New Roman') |
| | plt.xticks(range(len(x_labels)), x_labels, rotation=45, ha='right', fontsize=16) |
| | plt.yticks(fontsize=16) |
| | |
| | for label in plt.gca().get_xticklabels(): |
| | label.set_fontfamily('Times New Roman') |
| | for label in plt.gca().get_yticklabels(): |
| | label.set_fontfamily('Times New Roman') |
| | plt.ylim(0.15, 0.25) |
| | plt.grid(alpha=0.3, linestyle='--') |
| | plt.tight_layout() |
| | |
| | |
| | step = layer_data[sorted_layer_ids[0]]['step'] |
| | filename = os.path.join(vis_dir, f'attn_vis_accumulated_step{step:04d}_batch{b_idx}.png') |
| | plt.savefig(filename, dpi=150, bbox_inches='tight') |
| | plt.close() |
| | |
| | print(f"累计Cross-Attention可视化已保存: {filename} (Batch {b_idx}, Mode: {vis_mode})") |
| | |
| | |
| | del _cross_attn_accumulated_data[b_idx] |
| |
|
| | def forward(self, x, context=None, custom_attn_mask=None): |
| | B, T, C = x.size() |
| |
|
| | |
| | |
| | |
| | if context is not None: |
| | k = self.key(context).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) |
| | q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| | v = self.value(context).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) |
| | else: |
| | k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| | q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| | v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| |
|
| | |
| | if self.use_rot_embed: |
| | q = self.rotary_pos_emb.rotate_queries_or_keys(q) |
| | k = self.rotary_pos_emb.rotate_queries_or_keys(k) |
| |
|
| | |
| | if self.flash: |
| | |
| | y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=custom_attn_mask, dropout_p=self.attn_dropout.p if self.training else 0, is_causal=self.causal) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | else: |
| | |
| | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
| | if self.causal: |
| | if custom_attn_mask is not None: |
| | att = att.masked_fill(custom_attn_mask == 0, float('-inf')) |
| | else: |
| | att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) |
| | att = F.softmax(att, dim=-1) |
| | att = self.attn_dropout(att) |
| | y = att @ v |
| | y = y.transpose(1, 2).contiguous().view(B, T, C) |
| |
|
| | |
| | y = self.resid_dropout(self.c_proj(y)) |
| | return y |
| | |
| |
|
| | class MLP(nn.Module): |
| |
|
| | def __init__( |
| | self, |
| | n_embd: int, |
| | bias: bool, |
| | dropout: float = 0 |
| | ): |
| | super().__init__() |
| | self.c_fc = nn.Linear(n_embd, 4 * n_embd, bias=bias) |
| | self.gelu = nn.GELU() |
| | self.c_proj = nn.Linear(4 * n_embd, n_embd, bias=bias) |
| | self.dropout = nn.Dropout(dropout) |
| |
|
| | def forward(self, x): |
| | x = self.c_fc(x) |
| | x = self.gelu(x) |
| | x = self.c_proj(x) |
| | x = self.dropout(x) |
| | return x |
| |
|
| |
|
| | class Block(nn.Module): |
| |
|
| | def __init__( |
| | self, |
| | n_embd: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | mlp_pdrop: float, |
| | block_size: int, |
| | causal: bool, |
| | use_cross_attention: bool = False, |
| | use_rot_embed: bool=False, |
| | rotary_xpos: bool = False, |
| | bias: bool = False, |
| | ): |
| | super().__init__() |
| | self.ln_1 = LayerNorm(n_embd, bias=bias) |
| | self.attn = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos) |
| | self.use_cross_attention = use_cross_attention |
| | if self.use_cross_attention: |
| | self.cross_att = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos) |
| | self.ln3 = nn.LayerNorm(n_embd) |
| | self.ln_2 = LayerNorm(n_embd, bias=bias) |
| | self.mlp = MLP(n_embd, bias, mlp_pdrop) |
| |
|
| | def forward(self, x, context=None, custom_attn_mask=None): |
| | x = x + self.attn(self.ln_1(x), custom_attn_mask=custom_attn_mask) |
| | if self.use_cross_attention and context is not None: |
| | x = x + self.cross_att(self.ln3(x), context, custom_attn_mask=custom_attn_mask) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| |
|
| | class CrossAttentionOnlyBlock(nn.Module): |
| |
|
| | def __init__( |
| | self, |
| | n_embd: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | mlp_pdrop: float, |
| | block_size: int, |
| | causal: bool, |
| | use_rot_embed: bool=False, |
| | rotary_xpos: bool = False, |
| | bias: bool = False, |
| | ): |
| | super().__init__() |
| | self.ln_1 = LayerNorm(n_embd, bias=bias) |
| | self.cross_att = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos) |
| | self.ln_2 = LayerNorm(n_embd, bias=bias) |
| | self.mlp = MLP(n_embd, bias, mlp_pdrop) |
| |
|
| | def forward(self, x, context=None, custom_attn_mask=None): |
| | x = x + self.cross_att(self.ln_1(x), context, custom_attn_mask=custom_attn_mask) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| | class AdaLNZero(nn.Module): |
| | """ |
| | AdaLN-Zero modulation for conditioning. |
| | """ |
| | def __init__(self, hidden_size): |
| | super().__init__() |
| | self.modulation = nn.Sequential( |
| | nn.SiLU(), |
| | nn.Linear(hidden_size, 6 * hidden_size, bias=True) |
| | ) |
| | |
| | |
| | |
| |
|
| | def forward(self, c): |
| | return self.modulation(c).chunk(6, dim=-1) |
| |
|
| | def modulate(x, shift, scale): |
| | return shift + (x * (scale)) |
| |
|
| |
|
| | class ConditionedBlock(Block): |
| | """ |
| | Block with AdaLN-Zero conditioning. |
| | """ |
| | def __init__( |
| | self, |
| | n_embd, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal, |
| | film_cond_dim, |
| | use_cross_attention=False, |
| | use_rot_embed=False, |
| | rotary_xpos=False, |
| | bias=False |
| | ): |
| | super().__init__(n_embd, n_heads, attn_pdrop, resid_pdrop, mlp_pdrop, block_size, causal, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias) |
| | self.adaLN_zero = AdaLNZero(film_cond_dim) |
| |
|
| | def forward(self, x, c, context=None, custom_attn_mask=None): |
| | shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_zero(c) |
| | |
| | |
| | x_attn = self.ln_1(x) |
| | x_attn = modulate(x_attn, shift_msa, scale_msa) |
| | x = x + gate_msa * self.attn(x_attn, custom_attn_mask=custom_attn_mask) |
| | |
| | |
| | if self.use_cross_attention and context is not None: |
| | x = x + self.cross_att(self.ln3(x), context, custom_attn_mask=custom_attn_mask) |
| | |
| | |
| | x_mlp = self.ln_2(x) |
| | x_mlp = modulate(x_mlp, shift_mlp, scale_mlp) |
| | x = x + gate_mlp * self.mlp(x_mlp) |
| | |
| | return x |
| |
|
| | class NoiseBlock(Block): |
| | """ |
| | Block with AdaLN-Zero conditioning. |
| | """ |
| | def __init__( |
| | self, |
| | n_embd, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal, |
| | use_cross_attention=False, |
| | use_rot_embed=False, |
| | rotary_xpos=False, |
| | bias=False |
| | ): |
| | super().__init__(n_embd, n_heads, attn_pdrop, resid_pdrop, mlp_pdrop, block_size, causal, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias) |
| |
|
| | def forward(self, x, c, context=None, custom_attn_mask=None): |
| | |
| | x = x + self.attn(self.ln_1(x) + c, custom_attn_mask=custom_attn_mask) |
| | if self.use_cross_attention and context is not None: |
| | x = x + self.cross_att(self.ln3(x) + c, context, custom_attn_mask=custom_attn_mask) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| | |
| |
|
| | class TransformerEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[Block( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| | |
| | |
| | class TransformerEncoderInterleaved(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[Block( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x): |
| | outputs = [] |
| | for layer in self.blocks: |
| | x = layer(x) |
| | outputs.append(x) |
| | x = self.ln(x) |
| | outputs.pop(-1) |
| | outputs.append(x) |
| | return outputs |
| | |
| |
|
| | class TransformerFiLMEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | film_cond_dim: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[ConditionedBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias, |
| | film_cond_dim=film_cond_dim |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, c): |
| | for layer in self.blocks: |
| | x = layer(x, c) |
| | x = self.ln(x) |
| | return x |
| |
|
| |
|
| | class TransformerDecoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[Block( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=True, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, cond=None, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, cond, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| |
|
| |
|
| |
|
| | class TransformerFiLMDecoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | film_cond_dim: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | use_noise_encoder: bool = False, |
| | kwargs: Optional[DictConfig] = None, |
| | ): |
| | super().__init__() |
| | if use_noise_encoder: |
| | self.blocks = nn.Sequential( |
| | *[NoiseBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=True, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias, |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | else: |
| | self.blocks = nn.Sequential( |
| | *[ConditionedBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=True, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias, |
| | film_cond_dim=film_cond_dim, |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, c, cond=None, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, c, cond, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| |
|
| |
|
| | class TransformerFiLMDecoderInterleaved(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | film_cond_dim: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | use_noise_encoder: bool = False, |
| | kwargs: Optional[DictConfig] = None, |
| | ): |
| | super().__init__() |
| | if use_noise_encoder: |
| | self.blocks = nn.Sequential( |
| | *[NoiseBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=True, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias, |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | else: |
| | self.blocks = nn.Sequential( |
| | *[ConditionedBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=True, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias, |
| | film_cond_dim=film_cond_dim, |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, c, cond=None, custom_attn_mask=None): |
| | for idx, layer in enumerate(self.blocks): |
| | cond_tokens =cond[idx] |
| | x = layer(x, c, cond_tokens, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| |
|
| |
|
| | class TransformerCrossAttentionEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[Block( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, cond=None, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, cond, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| | |
| |
|
| | class TransformerCrossAttentionOnlyEncoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[CrossAttentionOnlyBlock( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, cond=None, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, cond, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| |
|
| | |
| | |
| | class MAPAttention(nn.Module): |
| | def __init__(self, embed_dim: int, n_heads: int) -> None: |
| | """Multi-Input Multi-Headed Attention Operation""" |
| | super().__init__() |
| | assert embed_dim % n_heads == 0, "`embed_dim` must be divisible by `n_heads`!" |
| | self.n_heads, self.scale = n_heads, (embed_dim // n_heads) ** -0.5 |
| |
|
| | |
| | self.q, self.kv = nn.Linear(embed_dim, embed_dim, bias=False), nn.Linear(embed_dim, 2 * embed_dim, bias=False) |
| | self.proj = nn.Linear(embed_dim, embed_dim) |
| |
|
| | def forward(self, seed: torch.Tensor, x: torch.Tensor) -> torch.Tensor: |
| | (B_s, K, C_s), (B_x, N, C_x) = seed.shape, x.shape |
| | assert C_s == C_x, "Seed vectors and pool inputs must have the same embedding dimensionality!" |
| |
|
| | |
| | q = self.q(seed).reshape(B_s, K, self.n_heads, C_s // self.n_heads).permute(0, 2, 1, 3) |
| | kv = self.kv(x).reshape(B_x, N, 2, self.n_heads, C_x // self.n_heads).permute(2, 0, 3, 1, 4) |
| | k, v = kv.unbind(0) |
| |
|
| | |
| | scores = q @ (k.transpose(-2, -1) * self.scale) |
| | attn = scores.softmax(dim=-1) |
| | vals = (attn @ v).transpose(1, 2).reshape(B_s, K, C_s) |
| |
|
| | |
| | return self.proj(vals) |
| |
|
| |
|
| | class MAPBlock(nn.Module): |
| | def __init__( |
| | self, |
| | n_latents: int, |
| | embed_dim: int, |
| | n_heads: int, |
| | output_dim: None, |
| | mlp_ratio: float = 4.0, |
| | do_rms_norm: bool = True, |
| | do_swish_glu: bool = True, |
| | ) -> None: |
| | """Multiheaded Attention Pooling Block -- note that for MAP, we adopt earlier post-norm conventions.""" |
| | super().__init__() |
| | self.n_latents, self.embed_dim, self.n_heads = n_latents, embed_dim, 2 * n_heads |
| |
|
| | self.embed_dim = output_dim |
| | |
| | self.projection = nn.Linear(embed_dim, self.embed_dim) |
| |
|
| | |
| | self.latents = nn.Parameter(torch.zeros(self.n_latents, self.embed_dim)) |
| | nn.init.normal_(self.latents, std=0.02) |
| |
|
| | |
| | self.attn_norm = RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-6) |
| | self.attn = MAPAttention(self.embed_dim, n_heads=self.n_heads) |
| | if output_dim is None: |
| | output_dim = self.embed_dim |
| | |
| | self.mlp_norm = RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-6) |
| | self.mlp = nn.Sequential( |
| | |
| | ( |
| | SwishGLU(self.embed_dim, int(mlp_ratio * self.embed_dim)) |
| | if do_swish_glu |
| | else nn.Sequential(nn.Linear(self.embed_dim, int(mlp_ratio * self.embed_dim)), nn.GELU()) |
| | ), |
| | nn.Linear(int(mlp_ratio * self.embed_dim), self.embed_dim), |
| | ) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | latents = repeat(self.latents, "n_latents d -> bsz n_latents d", bsz=x.shape[0]) |
| | latents = self.attn_norm(latents + self.attn(latents, self.projection(x))) |
| | latents = self.mlp_norm(latents + self.mlp(latents)) |
| | return latents.squeeze(dim=1) |
| |
|
| |
|
| | class SiamneseDecoder(nn.Module): |
| | def __init__( |
| | self, |
| | embed_dim: int, |
| | n_heads: int, |
| | attn_pdrop: float, |
| | resid_pdrop: float, |
| | n_layers: int, |
| | block_size: int, |
| | bias: bool = False, |
| | use_rot_embed: bool = False, |
| | rotary_xpos: bool = False, |
| | mlp_pdrop: float = 0, |
| | use_cross_attention: bool = True, |
| | ): |
| | super().__init__() |
| | self.blocks = nn.Sequential( |
| | *[Block( |
| | embed_dim, |
| | n_heads, |
| | attn_pdrop, |
| | resid_pdrop, |
| | mlp_pdrop, |
| | block_size, |
| | causal=False, |
| | use_cross_attention=use_cross_attention, |
| | use_rot_embed=use_rot_embed, |
| | rotary_xpos=rotary_xpos, |
| | bias=bias |
| | ) |
| | for _ in range(n_layers)] |
| | ) |
| | self.ln = LayerNorm(embed_dim, bias) |
| |
|
| | def forward(self, x, cond=None, custom_attn_mask=None): |
| | for layer in self.blocks: |
| | x = layer(x, cond, custom_attn_mask=custom_attn_mask) |
| | x = self.ln(x) |
| | return x |
| | |
| |
|
| | class ClipStyleProjection(nn.Module): |
| | |
| | def __init__(self, clip_style, token_dim=384, clip_token_index=0, num_token=4): |
| | super(ClipStyleProjection, self).__init__() |
| | self.clip_style = clip_style |
| | self.clip_token_index = clip_token_index |
| | if clip_style == 'map' or clip_style == 'map_state_only': |
| | self.latent_proj = MAPBlock(1, token_dim, 8, output_dim=token_dim) |
| | elif clip_style == 'mean_pooling' or clip_style == 'mean_pool_state_only': |
| | self.latent_proj = MeanPooling(token_dim) |
| | elif clip_style == 'mlp': |
| | self.latent_proj = nn.Sequential( |
| | nn.Linear(num_token * token_dim, token_dim), |
| | nn.LayerNorm(token_dim), |
| | nn.Tanh() |
| | ) |
| | elif clip_style == 'single_token': |
| | self.latent_proj = nn.Identity() |
| | elif clip_style == 'multihead': |
| | self.latent_proj = nn.Identity() |
| | else: |
| | raise ValueError("Invalid clip_style. Expected 'map', 'mean_pooling', or 'single_token' or 'multihead'.") |
| | |
| |
|
| | def forward(self, x): |
| | |
| | |
| | if self.clip_style == 'single_token': |
| | x = x[:, self.clip_token_index, :] |
| | elif self.clip_style == 'map_state_only' or self.clip_style == 'mean_pool_state_only': |
| | x = x[:, 1:] |
| | elif self.clip_style == 'mlp': |
| | |
| | x = einops.rearrange(x, 'b t d -> b (t d)') |
| | |
| | return self.latent_proj(x) |
| |
|
| |
|
| | class MeanPooling(nn.Module): |
| | def __init__(self, token_dim): |
| | super(MeanPooling, self).__init__() |
| | self.token_dim = token_dim |
| |
|
| | def forward(self, x): |
| | return x.mean(dim=1).view(-1, self.token_dim) |
| |
|
| |
|