fjwwjf151's picture
Upload folder using huggingface_hub
b506011 verified
from matplotlib.pyplot import cla
import torch
import torch.nn as nn
import torch.nn.functional as F
import einops
from inspect import isfunction
from typing import Optional, Tuple
import logging
import math
from typing import Optional
import torch
import torch.nn as nn
from torch.nn import functional as F
from omegaconf import DictConfig
import einops
import matplotlib.pyplot as plt
import os
import numpy as np
from .position_embeddings import *
# 类级别的计数器,用于为每个Attention实例分配唯一ID
_attention_instance_counter = 0
# 全局字典,用于存储各层cross-attention的累计数据
_cross_attn_accumulated_data = {} # {batch_idx: {layer_id: accumulated_data}}
# 可视化模式:'average'(平均)或 'separate'(分别显示)
# 可通过环境变量 CROSS_ATTN_VIS_MODE 设置,默认为 'average'
def _get_cross_attn_vis_mode():
mode = os.environ.get('CROSS_ATTN_VIS_MODE', 'average').lower()
if mode not in ['average', 'separate']:
mode = 'average'
return mode
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
class LayerNorm(nn.Module):
""" LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
def __init__(self, ndim, bias):
super().__init__()
self.weight = nn.Parameter(torch.ones(ndim))
self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
def forward(self, input):
return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
# RMSNorm -- Better, simpler alternative to LayerNorm
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-8) -> None:
super().__init__()
self.scale, self.eps = dim**-0.5, eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
# SwishGLU -- A Gated Linear Unit (GLU) with the Swish activation; always better than GELU MLP!
class SwishGLU(nn.Module):
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
self.act, self.project = nn.SiLU(), nn.Linear(in_dim, 2 * out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
projected, gate = self.project(x).tensor_split(2, dim=-1)
return projected * self.act(gate)
class Attention(nn.Module):
def __init__(
self,
n_embd: int,
n_head: int,
attn_pdrop: float,
resid_pdrop: float,
block_size: int,
causal: bool = False,
bias=False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
rotary_emb_dim = None,
rotary_xpos_scale_base = 512,
rotary_interpolation_factor = 1.,
):
super().__init__()
assert n_embd % n_head == 0
# key, query, value projections for all heads, but in a batch
self.key = nn.Linear(n_embd, n_embd)
self.query = nn.Linear(n_embd, n_embd)
self.value = nn.Linear(n_embd, n_embd)
# output projection
self.c_proj = nn.Linear(n_embd, n_embd, bias=bias)
# regularization
self.attn_dropout = nn.Dropout(attn_pdrop)
self.resid_dropout = nn.Dropout(resid_pdrop)
self.n_head = n_head
self.n_embd = n_embd
self.causal = causal
# flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if not self.flash:
print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("bias", torch.tril(torch.ones(block_size, block_size))
.view(1, 1, block_size, block_size))
self.use_rot_embed = use_rot_embed
if self.use_rot_embed:
# Update (12/2022): Rotary embedding has since been hugely successful, widely adopted in many large language models, including the largest in the world, PaLM.
# However, it has been uncovered in the ALiBi paper that rotary embeddings cannot length extrapolate well.
# This was recently addressed in <a href="https://arxiv.org/abs/2212.10554v1">a Microsoft research paper</a>.
# They propose a way to unobtrusively add the same decay as in ALiBi, and found that this resolves the extrapolation problem.
# You can use it in this repository by setting `rotary_xpos = True`. Like ALiBi, it would enforce the attention to be local. You can set the receptive field with `rotary_xpos_scale_base` value, which defaults to `512`
rotary_emb_dim = max(default(rotary_emb_dim, self.n_head // 2), 32)
self.rotary_pos_emb = RotaryEmbedding(
rotary_emb_dim,
use_xpos = rotary_xpos,
xpos_scale_base = rotary_xpos_scale_base,
interpolate_factor = rotary_interpolation_factor,
)
# 为每个Attention实例分配唯一ID
global _attention_instance_counter
self._instance_id = _attention_instance_counter
_attention_instance_counter += 1
# 用于保存可视化计数
self._vis_counter = 0
def _visualize_attention(self, att: torch.Tensor, is_cross_attention: bool = False):
"""
可视化attention权重(softmax后的)
att: (B, nh, T_q, T_k) attention权重矩阵
is_cross_attention: 是否为cross-attention
"""
B = att.shape[0] # batch size
# 如果是cross-attention,累计数据
if is_cross_attention:
global _cross_attn_accumulated_data
# 创建输出目录
vis_dir = "attention_vis"
os.makedirs(vis_dir, exist_ok=True)
# 循环处理batch内所有样本
for b_idx in range(B):
# 平均所有头,得到该batch样本的attention权重
att_vis = att[b_idx].mean(dim=0).detach().cpu().numpy() # (T_q, T_k)
# 压缩query维度:对T_q维度求平均,得到每个key位置的总体attention
att_vis_1d = att_vis.mean(axis=0) # (T_k,) - 每个key位置受到的平均attention
# 忽略idx=0,处理剩下的160个位置(idx 1-160)
# 每32个位置累计起来
chunk_size = 32
# 跳过idx=0,从idx=1开始处理
att_vis_filtered = att_vis_1d[1:161] # 取idx 1-160,共160个位置
n_chunks = len(att_vis_filtered) // chunk_size # 160 / 32 = 5个chunks
att_vis_accumulated = []
x_labels = []
for i in range(n_chunks):
start_idx_in_filtered = i * chunk_size # 在filtered数组中的索引
end_idx_in_filtered = (i + 1) * chunk_size
# 实际的idx是从1开始的,所以需要+1
actual_start_idx = start_idx_in_filtered + 1
actual_end_idx = end_idx_in_filtered # end_idx_in_filtered=32对应实际idx=32
chunk_sum = att_vis_filtered[start_idx_in_filtered:end_idx_in_filtered].sum()
att_vis_accumulated.append(chunk_sum)
x_labels.append(f'{actual_start_idx}-{actual_end_idx}')
# 处理剩余的位置(如果有,但160个位置应该正好是5组,不应该有剩余)
if len(att_vis_filtered) % chunk_size != 0:
start_idx_in_filtered = n_chunks * chunk_size
actual_start_idx = start_idx_in_filtered + 1
chunk_sum = att_vis_filtered[start_idx_in_filtered:].sum()
att_vis_accumulated.append(chunk_sum)
x_labels.append(f'{actual_start_idx}-{len(att_vis_1d)-1}')
att_vis_accumulated = np.array(att_vis_accumulated)
# 存储到全局字典中(确保b_idx存在)
if b_idx not in _cross_attn_accumulated_data:
_cross_attn_accumulated_data[b_idx] = {}
_cross_attn_accumulated_data[b_idx][self._instance_id] = {
'data': att_vis_accumulated,
'x_labels': x_labels,
'step': self._vis_counter,
'full_map': att_vis
}
# 检查当前batch是否已经收集了4层cross-attention数据
if b_idx in _cross_attn_accumulated_data and len(_cross_attn_accumulated_data[b_idx]) >= 4:
# 保存数据到txt文件
self._save_accumulated_cross_attn_to_txt(b_idx)
# 保存最后一层的热力图
self._save_last_layer_heatmap(b_idx)
# 可视化(如果需要)
self._plot_accumulated_cross_attn(b_idx)
self._vis_counter += 1
else:
# 非cross-attention的情况,保持原有逻辑(如果需要的话)
pass
def _save_accumulated_cross_attn_to_txt(self, b_idx):
"""保存所有层cross-attention的累计结果到txt文件"""
global _cross_attn_accumulated_data
if b_idx not in _cross_attn_accumulated_data:
return
layer_data = _cross_attn_accumulated_data[b_idx]
# 确保有4层数据
if len(layer_data) < 4:
return
vis_dir = "attention_vis"
os.makedirs(vis_dir, exist_ok=True)
# 获取x_labels(应该所有层都一样)
x_labels = None
for layer_id in sorted(layer_data.keys()):
if x_labels is None:
x_labels = layer_data[layer_id]['x_labels']
break
sorted_layer_ids = sorted(layer_data.keys())[:4]
step = layer_data[sorted_layer_ids[0]]['step']
# 收集所有层的数据(separate模式)
all_data = {}
for layer_id in sorted_layer_ids:
all_data[layer_id] = layer_data[layer_id]['data']
# 计算平均数据(从separate数据计算)
all_data_array = np.stack([all_data[layer_id] for layer_id in sorted_layer_ids], axis=0) # (4, n_chunks)
averaged_data = all_data_array.mean(axis=0) # (n_chunks,)
# 保存到txt文件
filename = os.path.join(vis_dir, f'attn_accumulated_step{step:04d}_batch{b_idx}.txt')
with open(filename, 'w') as f:
f.write(f"Cross-Attention Accumulated Data - Batch {b_idx}, Step {step}\n")
f.write("=" * 60 + "\n\n")
# 写入x_labels(位置范围)
f.write("Key Position Ranges (每32个累计):\n")
f.write(", ".join(x_labels) + "\n\n")
# 写入每层的数据(separate模式)
f.write("Separate Mode - Each Layer Data:\n")
f.write("-" * 60 + "\n")
for layer_id in sorted_layer_ids:
data = all_data[layer_id]
f.write(f"Layer {layer_id}:\n")
f.write(", ".join([f"{val:.6f}" for val in data]) + "\n")
f.write(f" Sum: {data.sum():.6f}, Mean: {data.mean():.6f}, Max: {data.max():.6f}, Min: {data.min():.6f}\n\n")
# 写入平均数据(从separate计算得出)
f.write("Average Mode - Average of 4 Layers (calculated from separate data):\n")
f.write("-" * 60 + "\n")
f.write(", ".join([f"{val:.6f}" for val in averaged_data]) + "\n")
f.write(f" Sum: {averaged_data.sum():.6f}, Mean: {averaged_data.mean():.6f}, Max: {averaged_data.max():.6f}, Min: {averaged_data.min():.6f}\n")
print(f"Cross-Attention数据已保存到txt: {filename} (Batch {b_idx})")
print(f" 包含4层separate数据和average数据(从separate计算得出)")
def _save_last_layer_heatmap(self, b_idx):
"""仅保存最后一层cross-attention的热力图(不压缩action维度,但key维度按32分组)"""
global _cross_attn_accumulated_data
if b_idx not in _cross_attn_accumulated_data:
return
layer_data = _cross_attn_accumulated_data[b_idx]
if len(layer_data) < 4:
return
# 取layer_id最大的那一层,视作最后一层
last_layer_id = max(layer_data.keys())
if 'full_map' not in layer_data[last_layer_id]:
return
heatmap = layer_data[last_layer_id]['full_map'] # (T_q, T_k)
step = layer_data[last_layer_id]['step']
# 对key维度进行压缩:忽略idx=0,每32个位置分组
chunk_size = 32
T_q, T_k = heatmap.shape
# 跳过idx=0,从idx=1开始处理,最多取到160(即索引1-160,共160个位置)
# 如果T_k小于161,则取到T_k-1
end_idx_filtered = min(161, T_k)
heatmap_filtered = heatmap[:, 1:end_idx_filtered] # (T_q, min(160, T_k-1))
# 将key位置压缩成组(每32个一组)
n_chunks = heatmap_filtered.shape[1] // chunk_size
heatmap_compressed = []
# 固定横坐标标签为 "-40 -20 0 20 40"
fixed_x_labels = ['-40', '-20', '0', '20', '40']
for i in range(n_chunks):
start_idx = i * chunk_size
end_idx = (i + 1) * chunk_size
# 对每个query位置,将该组的32个key位置的attention求和
chunk_sum = heatmap_filtered[:, start_idx:end_idx].sum(axis=1, keepdims=True) # (T_q, 1)
heatmap_compressed.append(chunk_sum)
# 处理剩余的位置(如果有)
if heatmap_filtered.shape[1] % chunk_size != 0:
start_idx = n_chunks * chunk_size
chunk_sum = heatmap_filtered[:, start_idx:].sum(axis=1, keepdims=True)
heatmap_compressed.append(chunk_sum)
# 使用固定的横坐标标签
x_labels = fixed_x_labels[:len(heatmap_compressed)]
# 拼接成压缩后的热力图 (T_q, n_groups)
if len(heatmap_compressed) > 0:
heatmap_compressed = np.concatenate(heatmap_compressed, axis=1) # (T_q, n_groups)
else:
# 如果没有数据,创建一个空的热力图
heatmap_compressed = np.zeros((T_q, 1))
vis_dir = "attention_vis"
os.makedirs(vis_dir, exist_ok=True)
# 设置字体为 Times New Roman
plt.rcParams['font.family'] = 'Times New Roman'
plt.figure(figsize=(10, 8))
plt.imshow(heatmap_compressed, cmap='viridis', aspect='auto')
cbar = plt.colorbar(label='Attention Weight')
cbar.set_label('Attention Weight', fontsize=24, fontfamily='Times New Roman')
cbar.ax.tick_params(labelsize=20)
# 设置 colorbar 刻度字体
for label in cbar.ax.get_yticklabels():
label.set_fontfamily('Times New Roman')
# plt.title(f'Last Layer Cross-Attention Heatmap (Layer {last_layer_id})\nBatch {b_idx}, Step {step}', fontsize=13)
plt.xlabel('View angle', fontsize=24, fontfamily='Times New Roman')
plt.ylabel('Action', fontsize=24, fontfamily='Times New Roman')
plt.xticks(range(len(x_labels)), x_labels, rotation=0, ha='center', fontsize=20)
plt.yticks(fontsize=20)
# 设置刻度标签字体
for label in plt.gca().get_xticklabels():
label.set_fontfamily('Times New Roman')
for label in plt.gca().get_yticklabels():
label.set_fontfamily('Times New Roman')
plt.tight_layout()
filename = os.path.join(vis_dir, f'attn_heatmap_last_layer_step{step:04d}_batch{b_idx}.png')
plt.savefig(filename, dpi=150, bbox_inches='tight')
plt.close()
print(f"最后一层Cross-Attention热力图已保存: {filename} (Batch {b_idx}, Layer {last_layer_id}, Shape: {heatmap_compressed.shape})")
def _plot_accumulated_cross_attn(self, b_idx):
"""绘制所有层cross-attention的累计结果"""
global _cross_attn_accumulated_data
if b_idx not in _cross_attn_accumulated_data:
return
layer_data = _cross_attn_accumulated_data[b_idx]
# 确保有4层数据
if len(layer_data) < 4:
return
vis_mode = _get_cross_attn_vis_mode()
vis_dir = "attention_vis"
# 设置字体为 Times New Roman
plt.rcParams['font.family'] = 'Times New Roman'
# 创建图表
plt.figure(figsize=(12, 6))
# 获取x_labels(应该所有层都一样)
x_labels = None
for layer_id in sorted(layer_data.keys()):
if x_labels is None:
x_labels = layer_data[layer_id]['x_labels']
break
sorted_layer_ids = sorted(layer_data.keys())[:4]
if vis_mode == 'average':
# 平均模式:将四层数据求平均后绘制一条曲线
all_data = []
for layer_id in sorted_layer_ids:
data = layer_data[layer_id]['data']
all_data.append(data)
# 将四层数据堆叠后求平均
all_data_array = np.stack(all_data, axis=0) # (4, n_chunks)
averaged_data = all_data_array.mean(axis=0) # (n_chunks,)
# 绘制平均后的曲线
step = layer_data[sorted_layer_ids[0]]['step']
plt.plot(range(len(averaged_data)), averaged_data, linewidth=2, marker='o', markersize=4,
color='steelblue', alpha=0.8, label='Average (4 Layers)')
plt.title(f'Average Cross-Attention (All 4 Layers) - Batch {b_idx}', fontsize=18, fontfamily='Times New Roman')
else:
# 分别显示模式:绘制四条曲线
colors = ['steelblue', 'coral', 'mediumseagreen', 'mediumpurple'] # 4种颜色对应4层
for i, layer_id in enumerate(sorted_layer_ids):
data = layer_data[layer_id]['data']
step = layer_data[layer_id]['step']
color = colors[i % len(colors)]
plt.plot(range(len(data)), data, linewidth=2, marker='o', markersize=4,
color=color, alpha=0.8, label=f'Layer {layer_id}')
plt.title(f'Accumulated Cross-Attention (All Layers) - Batch {b_idx}', fontsize=18, fontfamily='Times New Roman')
legend = plt.legend(loc='best', fontsize=16)
for text in legend.get_texts():
text.set_fontfamily('Times New Roman')
plt.xlabel('Key Position (每32个累计)', fontsize=18, fontfamily='Times New Roman')
plt.ylabel('Cumulative Attention Weight', fontsize=18, fontfamily='Times New Roman')
plt.xticks(range(len(x_labels)), x_labels, rotation=45, ha='right', fontsize=16)
plt.yticks(fontsize=16)
# 设置刻度标签字体
for label in plt.gca().get_xticklabels():
label.set_fontfamily('Times New Roman')
for label in plt.gca().get_yticklabels():
label.set_fontfamily('Times New Roman')
plt.ylim(0.15, 0.25) # 统一纵坐标单位,范围0.15-0.25
plt.grid(alpha=0.3, linestyle='--')
plt.tight_layout()
# 保存累计可视化图像
step = layer_data[sorted_layer_ids[0]]['step']
filename = os.path.join(vis_dir, f'attn_vis_accumulated_step{step:04d}_batch{b_idx}.png')
plt.savefig(filename, dpi=150, bbox_inches='tight')
plt.close()
print(f"累计Cross-Attention可视化已保存: {filename} (Batch {b_idx}, Mode: {vis_mode})")
# 清理该batch的数据
del _cross_attn_accumulated_data[b_idx]
def forward(self, x, context=None, custom_attn_mask=None):
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
# if the context is not None we do cross-attention othberwise self=attention
# cross attention computes the query from x and the keys and values are from the context
if context is not None:
k = self.key(context).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(context).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
else:
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# apply rotary stuff here if needed:
if self.use_rot_embed:
q = self.rotary_pos_emb.rotate_queries_or_keys(q)
k = self.rotary_pos_emb.rotate_queries_or_keys(k)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
if self.flash:
# efficient attention using Flash Attention CUDA kernels
y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=custom_attn_mask, dropout_p=self.attn_dropout.p if self.training else 0, is_causal=self.causal)
# if context is not None:
# # 为了可视化,手动计算attention权重(softmax后的)
# att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
# if custom_attn_mask is not None:
# att = att.masked_fill(custom_attn_mask == 0, float('-inf'))
# att = F.softmax(att, dim=-1) # softmax后的attention权重
# # 可视化attention(softmax后)
# self._visualize_attention(att, context is not None)
else:
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
if self.causal:
if custom_attn_mask is not None:
att = att.masked_fill(custom_attn_mask == 0, float('-inf'))
else:
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_dropout(self.c_proj(y))
return y
class MLP(nn.Module):
def __init__(
self,
n_embd: int,
bias: bool,
dropout: float = 0
):
super().__init__()
self.c_fc = nn.Linear(n_embd, 4 * n_embd, bias=bias)
self.gelu = nn.GELU()
self.c_proj = nn.Linear(4 * n_embd, n_embd, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.c_fc(x)
x = self.gelu(x)
x = self.c_proj(x)
x = self.dropout(x)
return x
class Block(nn.Module):
def __init__(
self,
n_embd: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
mlp_pdrop: float,
block_size: int,
causal: bool,
use_cross_attention: bool = False,
use_rot_embed: bool=False,
rotary_xpos: bool = False,
bias: bool = False, # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
):
super().__init__()
self.ln_1 = LayerNorm(n_embd, bias=bias)
self.attn = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos)
self.use_cross_attention = use_cross_attention
if self.use_cross_attention:
self.cross_att = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos)
self.ln3 = nn.LayerNorm(n_embd)
self.ln_2 = LayerNorm(n_embd, bias=bias)
self.mlp = MLP(n_embd, bias, mlp_pdrop)
def forward(self, x, context=None, custom_attn_mask=None):
x = x + self.attn(self.ln_1(x), custom_attn_mask=custom_attn_mask)
if self.use_cross_attention and context is not None:
x = x + self.cross_att(self.ln3(x), context, custom_attn_mask=custom_attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class CrossAttentionOnlyBlock(nn.Module):
def __init__(
self,
n_embd: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
mlp_pdrop: float,
block_size: int,
causal: bool,
use_rot_embed: bool=False,
rotary_xpos: bool = False,
bias: bool = False, # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
):
super().__init__()
self.ln_1 = LayerNorm(n_embd, bias=bias)
self.cross_att = Attention(n_embd, n_heads, attn_pdrop, resid_pdrop, block_size, causal, bias, use_rot_embed, rotary_xpos)
self.ln_2 = LayerNorm(n_embd, bias=bias)
self.mlp = MLP(n_embd, bias, mlp_pdrop)
def forward(self, x, context=None, custom_attn_mask=None):
x = x + self.cross_att(self.ln_1(x), context, custom_attn_mask=custom_attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class AdaLNZero(nn.Module):
"""
AdaLN-Zero modulation for conditioning.
"""
def __init__(self, hidden_size):
super().__init__()
self.modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
# Initialize weights and biases to zero
# nn.init.zeros_(self.modulation[1].weight)
# nn.init.zeros_(self.modulation[1].bias)
def forward(self, c):
return self.modulation(c).chunk(6, dim=-1)
def modulate(x, shift, scale):
return shift + (x * (scale))
class ConditionedBlock(Block):
"""
Block with AdaLN-Zero conditioning.
"""
def __init__(
self,
n_embd,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal,
film_cond_dim,
use_cross_attention=False,
use_rot_embed=False,
rotary_xpos=False,
bias=False # and any other arguments from the Block class
):
super().__init__(n_embd, n_heads, attn_pdrop, resid_pdrop, mlp_pdrop, block_size, causal,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias)
self.adaLN_zero = AdaLNZero(film_cond_dim)
def forward(self, x, c, context=None, custom_attn_mask=None):
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_zero(c)
# Attention with modulation
x_attn = self.ln_1(x)
x_attn = modulate(x_attn, shift_msa, scale_msa)
x = x + gate_msa * self.attn(x_attn, custom_attn_mask=custom_attn_mask)
# Cross attention if used
if self.use_cross_attention and context is not None:
x = x + self.cross_att(self.ln3(x), context, custom_attn_mask=custom_attn_mask)
# MLP with modulation
x_mlp = self.ln_2(x)
x_mlp = modulate(x_mlp, shift_mlp, scale_mlp)
x = x + gate_mlp * self.mlp(x_mlp)
return x
class NoiseBlock(Block):
"""
Block with AdaLN-Zero conditioning.
"""
def __init__(
self,
n_embd,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal,
use_cross_attention=False,
use_rot_embed=False,
rotary_xpos=False,
bias=False # and any other arguments from the Block class
):
super().__init__(n_embd, n_heads, attn_pdrop, resid_pdrop, mlp_pdrop, block_size, causal,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias)
def forward(self, x, c, context=None, custom_attn_mask=None):
x = x + self.attn(self.ln_1(x) + c, custom_attn_mask=custom_attn_mask)
if self.use_cross_attention and context is not None:
x = x + self.cross_att(self.ln3(x) + c, context, custom_attn_mask=custom_attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class TransformerEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
):
super().__init__()
self.blocks = nn.Sequential(
*[Block(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class TransformerEncoderInterleaved(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
):
super().__init__()
self.blocks = nn.Sequential(
*[Block(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x):
outputs = []
for layer in self.blocks:
x = layer(x)
outputs.append(x)
x = self.ln(x)
outputs.pop(-1)
outputs.append(x)
return outputs
class TransformerFiLMEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
film_cond_dim: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
):
super().__init__()
self.blocks = nn.Sequential(
*[ConditionedBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias,
film_cond_dim=film_cond_dim
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, c):
for layer in self.blocks:
x = layer(x, c)
x = self.ln(x)
return x
class TransformerDecoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
):
super().__init__()
self.blocks = nn.Sequential(
*[Block(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=True,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, cond=None, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, cond, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class TransformerFiLMDecoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
film_cond_dim: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
use_noise_encoder: bool = False,
kwargs: Optional[DictConfig] = None,
):
super().__init__()
if use_noise_encoder:
self.blocks = nn.Sequential(
*[NoiseBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=True,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias,
)
for _ in range(n_layers)]
)
else:
self.blocks = nn.Sequential(
*[ConditionedBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=True,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias,
film_cond_dim=film_cond_dim,
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, c, cond=None, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, c, cond, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class TransformerFiLMDecoderInterleaved(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
film_cond_dim: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
use_noise_encoder: bool = False,
kwargs: Optional[DictConfig] = None,
):
super().__init__()
if use_noise_encoder:
self.blocks = nn.Sequential(
*[NoiseBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=True,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias,
)
for _ in range(n_layers)]
)
else:
self.blocks = nn.Sequential(
*[ConditionedBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=True,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias,
film_cond_dim=film_cond_dim,
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, c, cond=None, custom_attn_mask=None):
for idx, layer in enumerate(self.blocks):
cond_tokens =cond[idx]
x = layer(x, c, cond_tokens, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class TransformerCrossAttentionEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
):
super().__init__()
self.blocks = nn.Sequential(
*[Block(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, cond=None, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, cond, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class TransformerCrossAttentionOnlyEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
):
super().__init__()
self.blocks = nn.Sequential(
*[CrossAttentionOnlyBlock(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, cond=None, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, cond, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
# As defined in Set Transformers () -- basically the above, additionally taking in
# a set of $k$ learned "seed vectors" that are used to "pool" information.
class MAPAttention(nn.Module):
def __init__(self, embed_dim: int, n_heads: int) -> None:
"""Multi-Input Multi-Headed Attention Operation"""
super().__init__()
assert embed_dim % n_heads == 0, "`embed_dim` must be divisible by `n_heads`!"
self.n_heads, self.scale = n_heads, (embed_dim // n_heads) ** -0.5
# Projections (no bias) --> separate for Q (seed vector), and KV ("pool" inputs)
self.q, self.kv = nn.Linear(embed_dim, embed_dim, bias=False), nn.Linear(embed_dim, 2 * embed_dim, bias=False)
self.proj = nn.Linear(embed_dim, embed_dim)
def forward(self, seed: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
(B_s, K, C_s), (B_x, N, C_x) = seed.shape, x.shape
assert C_s == C_x, "Seed vectors and pool inputs must have the same embedding dimensionality!"
# Project Seed Vectors to `queries`
q = self.q(seed).reshape(B_s, K, self.n_heads, C_s // self.n_heads).permute(0, 2, 1, 3)
kv = self.kv(x).reshape(B_x, N, 2, self.n_heads, C_x // self.n_heads).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
# Attention --> compute weighted sum over values!
scores = q @ (k.transpose(-2, -1) * self.scale)
attn = scores.softmax(dim=-1)
vals = (attn @ v).transpose(1, 2).reshape(B_s, K, C_s)
# Project back to `embed_dim`
return self.proj(vals)
class MAPBlock(nn.Module):
def __init__(
self,
n_latents: int,
embed_dim: int,
n_heads: int,
output_dim: None,
mlp_ratio: float = 4.0,
do_rms_norm: bool = True,
do_swish_glu: bool = True,
) -> None:
"""Multiheaded Attention Pooling Block -- note that for MAP, we adopt earlier post-norm conventions."""
super().__init__()
self.n_latents, self.embed_dim, self.n_heads = n_latents, embed_dim, 2 * n_heads
self.embed_dim = output_dim
# Projection Operator
self.projection = nn.Linear(embed_dim, self.embed_dim)
# Initialize Latents
self.latents = nn.Parameter(torch.zeros(self.n_latents, self.embed_dim))
nn.init.normal_(self.latents, std=0.02)
# Custom MAP Attention (seed, encoder outputs) -> seed
self.attn_norm = RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-6)
self.attn = MAPAttention(self.embed_dim, n_heads=self.n_heads)
if output_dim is None:
output_dim = self.embed_dim
# Position-wise Feed-Forward Components
self.mlp_norm = RMSNorm(self.embed_dim) if do_rms_norm else nn.LayerNorm(self.embed_dim, eps=1e-6)
self.mlp = nn.Sequential(
# Handle SwishGLU vs. GELU MLP...
(
SwishGLU(self.embed_dim, int(mlp_ratio * self.embed_dim))
if do_swish_glu
else nn.Sequential(nn.Linear(self.embed_dim, int(mlp_ratio * self.embed_dim)), nn.GELU())
),
nn.Linear(int(mlp_ratio * self.embed_dim), self.embed_dim),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
latents = repeat(self.latents, "n_latents d -> bsz n_latents d", bsz=x.shape[0])
latents = self.attn_norm(latents + self.attn(latents, self.projection(x)))
latents = self.mlp_norm(latents + self.mlp(latents))
return latents.squeeze(dim=1)
class SiamneseDecoder(nn.Module):
def __init__(
self,
embed_dim: int,
n_heads: int,
attn_pdrop: float,
resid_pdrop: float,
n_layers: int,
block_size: int,
bias: bool = False,
use_rot_embed: bool = False,
rotary_xpos: bool = False,
mlp_pdrop: float = 0,
use_cross_attention: bool = True,
):
super().__init__()
self.blocks = nn.Sequential(
*[Block(
embed_dim,
n_heads,
attn_pdrop,
resid_pdrop,
mlp_pdrop,
block_size,
causal=False,
use_cross_attention=use_cross_attention,
use_rot_embed=use_rot_embed,
rotary_xpos=rotary_xpos,
bias=bias
)
for _ in range(n_layers)]
)
self.ln = LayerNorm(embed_dim, bias)
def forward(self, x, cond=None, custom_attn_mask=None):
for layer in self.blocks:
x = layer(x, cond, custom_attn_mask=custom_attn_mask)
x = self.ln(x)
return x
class ClipStyleProjection(nn.Module):
def __init__(self, clip_style, token_dim=384, clip_token_index=0, num_token=4):
super(ClipStyleProjection, self).__init__()
self.clip_style = clip_style
self.clip_token_index = clip_token_index
if clip_style == 'map' or clip_style == 'map_state_only':
self.latent_proj = MAPBlock(1, token_dim, 8, output_dim=token_dim)
elif clip_style == 'mean_pooling' or clip_style == 'mean_pool_state_only':
self.latent_proj = MeanPooling(token_dim)
elif clip_style == 'mlp':
self.latent_proj = nn.Sequential(
nn.Linear(num_token * token_dim, token_dim),
nn.LayerNorm(token_dim),
nn.Tanh()
)
elif clip_style == 'single_token':
self.latent_proj = nn.Identity()
elif clip_style == 'multihead':
self.latent_proj = nn.Identity() # No projection needed
else:
raise ValueError("Invalid clip_style. Expected 'map', 'mean_pooling', or 'single_token' or 'multihead'.")
# print(self.clip_style)
def forward(self, x):
# print('clip style is ' + self.clip_style)
# print(f'x shape {x.shape}')
if self.clip_style == 'single_token':
x = x[:, self.clip_token_index, :]
elif self.clip_style == 'map_state_only' or self.clip_style == 'mean_pool_state_only':
x = x[:, 1:]
elif self.clip_style == 'mlp':
# print('reshaping before clip')
x = einops.rearrange(x, 'b t d -> b (t d)')
# print(x.shape)
return self.latent_proj(x)
class MeanPooling(nn.Module):
def __init__(self, token_dim):
super(MeanPooling, self).__init__()
self.token_dim = token_dim
def forward(self, x):
return x.mean(dim=1).view(-1, self.token_dim)