| | import math |
| | from dataclasses import dataclass |
| |
|
| | import torch |
| | from einops import rearrange |
| | from torch import Tensor, nn |
| |
|
| | from src.flux.math import attention, rope, adaptive_attention |
| |
|
| | import os |
| |
|
| | class EmbedND(nn.Module): |
| | def __init__(self, dim: int, theta: int, axes_dim: list[int]): |
| | super().__init__() |
| | self.dim = dim |
| | self.theta = theta |
| | self.axes_dim = axes_dim |
| |
|
| | def forward(self, ids: Tensor) -> Tensor: |
| | n_axes = ids.shape[-1] |
| | emb = torch.cat( |
| | [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], |
| | dim=-3, |
| | ) |
| |
|
| | return emb.unsqueeze(1) |
| |
|
| |
|
| | def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0): |
| | """ |
| | Create sinusoidal timestep embeddings. |
| | :param t: a 1-D Tensor of N indices, one per batch element. |
| | These may be fractional. |
| | :param dim: the dimension of the output. |
| | :param max_period: controls the minimum frequency of the embeddings. |
| | :return: an (N, D) Tensor of positional embeddings. |
| | """ |
| | t = time_factor * t |
| | half = dim // 2 |
| | freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to( |
| | t.device |
| | ) |
| |
|
| | args = t[:, None].float() * freqs[None] |
| | embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| | if dim % 2: |
| | embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) |
| | if torch.is_floating_point(t): |
| | embedding = embedding.to(t) |
| | return embedding |
| |
|
| |
|
| | class MLPEmbedder(nn.Module): |
| | def __init__(self, in_dim: int, hidden_dim: int): |
| | super().__init__() |
| | self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True) |
| | self.silu = nn.SiLU() |
| | self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True) |
| |
|
| | def forward(self, x: Tensor) -> Tensor: |
| | return self.out_layer(self.silu(self.in_layer(x))) |
| |
|
| |
|
| | class RMSNorm(torch.nn.Module): |
| | def __init__(self, dim: int): |
| | super().__init__() |
| | self.scale = nn.Parameter(torch.ones(dim)) |
| |
|
| | def forward(self, x: Tensor): |
| | x_dtype = x.dtype |
| | x = x.float() |
| | rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6) |
| | return (x * rrms).to(dtype=x_dtype) * self.scale |
| |
|
| |
|
| | class QKNorm(torch.nn.Module): |
| | def __init__(self, dim: int): |
| | super().__init__() |
| | self.query_norm = RMSNorm(dim) |
| | self.key_norm = RMSNorm(dim) |
| |
|
| | def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]: |
| | q = self.query_norm(q) |
| | k = self.key_norm(k) |
| | return q.to(v), k.to(v) |
| |
|
| |
|
| | class SelfAttention(nn.Module): |
| | def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False): |
| | super().__init__() |
| | self.num_heads = num_heads |
| | head_dim = dim // num_heads |
| |
|
| | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| | self.norm = QKNorm(head_dim) |
| | self.proj = nn.Linear(dim, dim) |
| |
|
| | def forward(self, x: Tensor, pe: Tensor) -> Tensor: |
| | qkv = self.qkv(x) |
| | q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | q, k = self.norm(q, k, v) |
| | x = attention(q, k, v, pe=pe) |
| | x = self.proj(x) |
| | return x |
| |
|
| |
|
| | @dataclass |
| | class ModulationOut: |
| | shift: Tensor |
| | scale: Tensor |
| | gate: Tensor |
| |
|
| |
|
| | class Modulation(nn.Module): |
| | def __init__(self, dim: int, double: bool): |
| | super().__init__() |
| | self.is_double = double |
| | self.multiplier = 6 if double else 3 |
| | self.lin = nn.Linear(dim, self.multiplier * dim, bias=True) |
| |
|
| | def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]: |
| | out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1) |
| |
|
| | return ( |
| | ModulationOut(*out[:3]), |
| | ModulationOut(*out[3:]) if self.is_double else None, |
| | ) |
| |
|
| |
|
| | class DoubleStreamBlock(nn.Module): |
| | def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, cur_block:int, qkv_bias: bool = False): |
| | super().__init__() |
| |
|
| | mlp_hidden_dim = int(hidden_size * mlp_ratio) |
| | self.num_heads = num_heads |
| | self.hidden_size = hidden_size |
| | self.img_mod = Modulation(hidden_size, double=True) |
| | self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias) |
| |
|
| | self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.img_mlp = nn.Sequential( |
| | nn.Linear(hidden_size, mlp_hidden_dim, bias=True), |
| | nn.GELU(approximate="tanh"), |
| | nn.Linear(mlp_hidden_dim, hidden_size, bias=True), |
| | ) |
| |
|
| | self.txt_mod = Modulation(hidden_size, double=True) |
| | self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias) |
| |
|
| | self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.txt_mlp = nn.Sequential( |
| | nn.Linear(hidden_size, mlp_hidden_dim, bias=True), |
| | nn.GELU(approximate="tanh"), |
| | nn.Linear(mlp_hidden_dim, hidden_size, bias=True), |
| | ) |
| | self.cur_block = cur_block |
| |
|
| | def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, cur_step: int, info) -> tuple[Tensor, Tensor]: |
| |
|
| | img_mod1, img_mod2 = self.img_mod(vec) |
| | txt_mod1, txt_mod2 = self.txt_mod(vec) |
| |
|
| | |
| | img_modulated = self.img_norm1(img) |
| | img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift |
| | img_qkv = self.img_attn.qkv(img_modulated) |
| | img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| |
|
| | img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) |
| |
|
| | |
| | txt_modulated = self.txt_norm1(txt) |
| | txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift |
| | txt_qkv = self.txt_attn.qkv(txt_modulated) |
| | txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) |
| | |
| | q = torch.cat((txt_q, img_q), dim=2) |
| | k = torch.cat((txt_k, img_k), dim=2) |
| | v = torch.cat((txt_v, img_v), dim=2) |
| | |
| | |
| |
|
| | |
| | if not info['inverse'] and 'attn_guidance' in info['editing_strategy']: |
| | attn = adaptive_attention(q, k, v, pe=pe, txt_shape=txt.shape[1], img_shape=img.shape[1], cur_step=cur_step, cur_block=self.cur_block, info=info) |
| | else: |
| | attn = attention(q, k, v, pe=pe) |
| |
|
| | txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] |
| |
|
| | |
| | img = img + img_mod1.gate * self.img_attn.proj(img_attn) |
| | img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift) |
| |
|
| | |
| | txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn) |
| | txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) |
| |
|
| | return img, txt |
| |
|
| |
|
| | class SingleStreamBlock(nn.Module): |
| | """ |
| | A DiT block with parallel linear layers as described in |
| | https://arxiv.org/abs/2302.05442 and adapted modulation interface. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | hidden_size: int, |
| | num_heads: int, |
| | mlp_ratio: float = 4.0, |
| | qk_scale: float | None = None, |
| | ): |
| | super().__init__() |
| | self.hidden_dim = hidden_size |
| | self.num_heads = num_heads |
| | head_dim = hidden_size // num_heads |
| | self.scale = qk_scale or head_dim**-0.5 |
| |
|
| | self.mlp_hidden_dim = int(hidden_size * mlp_ratio) |
| | |
| | self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim) |
| | |
| | self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size) |
| |
|
| | self.norm = QKNorm(head_dim) |
| |
|
| | self.hidden_size = hidden_size |
| | self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| |
|
| | self.mlp_act = nn.GELU(approximate="tanh") |
| | self.modulation = Modulation(hidden_size, double=False) |
| |
|
| | def forward(self, x: Tensor, vec: Tensor, pe: Tensor, info) -> Tensor: |
| | mod, _ = self.modulation(vec) |
| | x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift |
| | qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) |
| |
|
| | q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | q, k = self.norm(q, k, v) |
| |
|
| | |
| | if info['inject'] and info['id'] <= info['end_layer_index'] and info['id'] >= info['start_layer_index']: |
| | v_feature_name = str(info['t']) + '_' + str(info['second_order']) + '_' + str(info['id']) + '_' + info['type'] + '_' + 'V' |
| | k_feature_name = str(info['t']) + '_' + str(info['second_order']) + '_' + str(info['id']) + '_' + info['type'] + '_' + 'K' |
| | q_feature_name = str(info['t']) + '_' + str(info['second_order']) + '_' + str(info['id']) + '_' + info['type'] + '_' + 'Q' |
| | if info['inverse']: |
| | if info['reuse_v']: |
| | info['feature'][v_feature_name] = v.cpu() |
| | else: |
| | editing_strategy = info['editing_strategy'] |
| | qkv_ratio = info['qkv_ratio'] |
| | if 'q' in editing_strategy: |
| | info['feature'][q_feature_name] = (q * qkv_ratio[0]).cpu() |
| | if 'k' in editing_strategy: |
| | info['feature'][k_feature_name] = (k * qkv_ratio[1]).cpu() |
| | if 'v' in editing_strategy: |
| | info['feature'][v_feature_name] = (v * qkv_ratio[2]).cpu() |
| | else: |
| | if info['reuse_v']: |
| | if v_feature_name in info['feature']: |
| | v = info['feature'][v_feature_name].cuda() |
| | else: |
| | editing_strategy = info['editing_strategy'] |
| | if 'replace_v' in editing_strategy: |
| | if v_feature_name in info['feature']: |
| | v = info['feature'][v_feature_name].cuda() |
| | if 'add_v' in editing_strategy: |
| | if v_feature_name in info['feature']: |
| | v += info['feature'][v_feature_name].cuda() |
| | if 'replace_k' in editing_strategy: |
| | if k_feature_name in info['feature']: |
| | k = info['feature'][k_feature_name].cuda() |
| | if 'add_k' in editing_strategy: |
| | if k_feature_name in info['feature']: |
| | k += info['feature'][k_feature_name].cuda() |
| | if 'replace_q' in editing_strategy: |
| | if q_feature_name in info['feature']: |
| | q = info['feature'][q_feature_name].cuda() |
| | if 'add_q' in editing_strategy: |
| | if q_feature_name in info['feature']: |
| | q += info['feature'][q_feature_name].cuda() |
| |
|
| | |
| | attn = attention(q, k, v, pe=pe) |
| | |
| | output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) |
| | return x + mod.gate * output, info |
| |
|
| |
|
| | class LastLayer(nn.Module): |
| | def __init__(self, hidden_size: int, patch_size: int, out_channels: int): |
| | super().__init__() |
| | self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) |
| | self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)) |
| |
|
| | def forward(self, x: Tensor, vec: Tensor) -> Tensor: |
| | shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1) |
| | x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :] |
| | x = self.linear(x) |
| | return x |
| |
|