| |
|
| | from typing import Any, Dict, Optional, Tuple, Union
|
| |
|
| | import torch
|
| | from diffusers.models.activations import get_activation
|
| | from diffusers.models.attention_processor import Attention
|
| | from diffusers.models.dual_transformer_2d import DualTransformer2DModel
|
| | from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
|
| | from diffusers.utils import is_torch_version, logging
|
| | from diffusers.utils.torch_utils import apply_freeu
|
| | from torch import nn
|
| |
|
| | from .transformer_2d import Transformer2DModel
|
| |
|
| | logger = logging.get_logger(__name__)
|
| |
|
| |
|
| | def get_down_block(
|
| | down_block_type: str,
|
| | num_layers: int,
|
| | in_channels: int,
|
| | out_channels: int,
|
| | temb_channels: int,
|
| | add_downsample: bool,
|
| | resnet_eps: float,
|
| | resnet_act_fn: str,
|
| | transformer_layers_per_block: int = 1,
|
| | num_attention_heads: Optional[int] = None,
|
| | resnet_groups: Optional[int] = None,
|
| | cross_attention_dim: Optional[int] = None,
|
| | downsample_padding: Optional[int] = None,
|
| | dual_cross_attention: bool = False,
|
| | use_linear_projection: bool = False,
|
| | only_cross_attention: bool = False,
|
| | upcast_attention: bool = False,
|
| | resnet_time_scale_shift: str = "default",
|
| | attention_type: str = "default",
|
| | resnet_skip_time_act: bool = False,
|
| | resnet_out_scale_factor: float = 1.0,
|
| | cross_attention_norm: Optional[str] = None,
|
| | attention_head_dim: Optional[int] = None,
|
| | downsample_type: Optional[str] = None,
|
| | dropout: float = 0.0,
|
| | ):
|
| |
|
| | if attention_head_dim is None:
|
| | logger.warn(
|
| | f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
|
| | )
|
| | attention_head_dim = num_attention_heads
|
| |
|
| | down_block_type = (
|
| | down_block_type[7:]
|
| | if down_block_type.startswith("UNetRes")
|
| | else down_block_type
|
| | )
|
| | if down_block_type == "DownBlock2D":
|
| | return DownBlock2D(
|
| | num_layers=num_layers,
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | dropout=dropout,
|
| | add_downsample=add_downsample,
|
| | resnet_eps=resnet_eps,
|
| | resnet_act_fn=resnet_act_fn,
|
| | resnet_groups=resnet_groups,
|
| | downsample_padding=downsample_padding,
|
| | resnet_time_scale_shift=resnet_time_scale_shift,
|
| | )
|
| | elif down_block_type == "CrossAttnDownBlock2D":
|
| | if cross_attention_dim is None:
|
| | raise ValueError(
|
| | "cross_attention_dim must be specified for CrossAttnDownBlock2D"
|
| | )
|
| | return CrossAttnDownBlock2D(
|
| | num_layers=num_layers,
|
| | transformer_layers_per_block=transformer_layers_per_block,
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | dropout=dropout,
|
| | add_downsample=add_downsample,
|
| | resnet_eps=resnet_eps,
|
| | resnet_act_fn=resnet_act_fn,
|
| | resnet_groups=resnet_groups,
|
| | downsample_padding=downsample_padding,
|
| | cross_attention_dim=cross_attention_dim,
|
| | num_attention_heads=num_attention_heads,
|
| | dual_cross_attention=dual_cross_attention,
|
| | use_linear_projection=use_linear_projection,
|
| | only_cross_attention=only_cross_attention,
|
| | upcast_attention=upcast_attention,
|
| | resnet_time_scale_shift=resnet_time_scale_shift,
|
| | attention_type=attention_type,
|
| | )
|
| | raise ValueError(f"{down_block_type} does not exist.")
|
| |
|
| |
|
| | def get_up_block(
|
| | up_block_type: str,
|
| | num_layers: int,
|
| | in_channels: int,
|
| | out_channels: int,
|
| | prev_output_channel: int,
|
| | temb_channels: int,
|
| | add_upsample: bool,
|
| | resnet_eps: float,
|
| | resnet_act_fn: str,
|
| | resolution_idx: Optional[int] = None,
|
| | transformer_layers_per_block: int = 1,
|
| | num_attention_heads: Optional[int] = None,
|
| | resnet_groups: Optional[int] = None,
|
| | cross_attention_dim: Optional[int] = None,
|
| | dual_cross_attention: bool = False,
|
| | use_linear_projection: bool = False,
|
| | only_cross_attention: bool = False,
|
| | upcast_attention: bool = False,
|
| | resnet_time_scale_shift: str = "default",
|
| | attention_type: str = "default",
|
| | resnet_skip_time_act: bool = False,
|
| | resnet_out_scale_factor: float = 1.0,
|
| | cross_attention_norm: Optional[str] = None,
|
| | attention_head_dim: Optional[int] = None,
|
| | upsample_type: Optional[str] = None,
|
| | dropout: float = 0.0,
|
| | ) -> nn.Module:
|
| |
|
| | if attention_head_dim is None:
|
| | logger.warn(
|
| | f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
|
| | )
|
| | attention_head_dim = num_attention_heads
|
| |
|
| | up_block_type = (
|
| | up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
|
| | )
|
| | if up_block_type == "UpBlock2D":
|
| | return UpBlock2D(
|
| | num_layers=num_layers,
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | prev_output_channel=prev_output_channel,
|
| | temb_channels=temb_channels,
|
| | resolution_idx=resolution_idx,
|
| | dropout=dropout,
|
| | add_upsample=add_upsample,
|
| | resnet_eps=resnet_eps,
|
| | resnet_act_fn=resnet_act_fn,
|
| | resnet_groups=resnet_groups,
|
| | resnet_time_scale_shift=resnet_time_scale_shift,
|
| | )
|
| | elif up_block_type == "CrossAttnUpBlock2D":
|
| | if cross_attention_dim is None:
|
| | raise ValueError(
|
| | "cross_attention_dim must be specified for CrossAttnUpBlock2D"
|
| | )
|
| | return CrossAttnUpBlock2D(
|
| | num_layers=num_layers,
|
| | transformer_layers_per_block=transformer_layers_per_block,
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | prev_output_channel=prev_output_channel,
|
| | temb_channels=temb_channels,
|
| | resolution_idx=resolution_idx,
|
| | dropout=dropout,
|
| | add_upsample=add_upsample,
|
| | resnet_eps=resnet_eps,
|
| | resnet_act_fn=resnet_act_fn,
|
| | resnet_groups=resnet_groups,
|
| | cross_attention_dim=cross_attention_dim,
|
| | num_attention_heads=num_attention_heads,
|
| | dual_cross_attention=dual_cross_attention,
|
| | use_linear_projection=use_linear_projection,
|
| | only_cross_attention=only_cross_attention,
|
| | upcast_attention=upcast_attention,
|
| | resnet_time_scale_shift=resnet_time_scale_shift,
|
| | attention_type=attention_type,
|
| | )
|
| |
|
| | raise ValueError(f"{up_block_type} does not exist.")
|
| |
|
| |
|
| | class AutoencoderTinyBlock(nn.Module):
|
| | """
|
| | Tiny Autoencoder block used in [`AutoencoderTiny`]. It is a mini residual module consisting of plain conv + ReLU
|
| | blocks.
|
| |
|
| | Args:
|
| | in_channels (`int`): The number of input channels.
|
| | out_channels (`int`): The number of output channels.
|
| | act_fn (`str`):
|
| | ` The activation function to use. Supported values are `"swish"`, `"mish"`, `"gelu"`, and `"relu"`.
|
| |
|
| | Returns:
|
| | `torch.FloatTensor`: A tensor with the same shape as the input tensor, but with the number of channels equal to
|
| | `out_channels`.
|
| | """
|
| |
|
| | def __init__(self, in_channels: int, out_channels: int, act_fn: str):
|
| | super().__init__()
|
| | act_fn = get_activation(act_fn)
|
| | self.conv = nn.Sequential(
|
| | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
|
| | act_fn,
|
| | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
|
| | act_fn,
|
| | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
|
| | )
|
| | self.skip = (
|
| | nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
|
| | if in_channels != out_channels
|
| | else nn.Identity()
|
| | )
|
| | self.fuse = nn.ReLU()
|
| |
|
| | def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
|
| | return self.fuse(self.conv(x) + self.skip(x))
|
| |
|
| |
|
| | class UNetMidBlock2D(nn.Module):
|
| | """
|
| | A 2D UNet mid-block [`UNetMidBlock2D`] with multiple residual blocks and optional attention blocks.
|
| |
|
| | Args:
|
| | in_channels (`int`): The number of input channels.
|
| | temb_channels (`int`): The number of temporal embedding channels.
|
| | dropout (`float`, *optional*, defaults to 0.0): The dropout rate.
|
| | num_layers (`int`, *optional*, defaults to 1): The number of residual blocks.
|
| | resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks.
|
| | resnet_time_scale_shift (`str`, *optional*, defaults to `default`):
|
| | The type of normalization to apply to the time embeddings. This can help to improve the performance of the
|
| | model on tasks with long-range temporal dependencies.
|
| | resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks.
|
| | resnet_groups (`int`, *optional*, defaults to 32):
|
| | The number of groups to use in the group normalization layers of the resnet blocks.
|
| | attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks.
|
| | resnet_pre_norm (`bool`, *optional*, defaults to `True`):
|
| | Whether to use pre-normalization for the resnet blocks.
|
| | add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks.
|
| | attention_head_dim (`int`, *optional*, defaults to 1):
|
| | Dimension of a single attention head. The number of attention heads is determined based on this value and
|
| | the number of input channels.
|
| | output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor.
|
| |
|
| | Returns:
|
| | `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size,
|
| | in_channels, height, width)`.
|
| |
|
| | """
|
| |
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | temb_channels: int,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | attn_groups: Optional[int] = None,
|
| | resnet_pre_norm: bool = True,
|
| | add_attention: bool = True,
|
| | attention_head_dim: int = 1,
|
| | output_scale_factor: float = 1.0,
|
| | ):
|
| | super().__init__()
|
| | resnet_groups = (
|
| | resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
|
| | )
|
| | self.add_attention = add_attention
|
| |
|
| | if attn_groups is None:
|
| | attn_groups = (
|
| | resnet_groups if resnet_time_scale_shift == "default" else None
|
| | )
|
| |
|
| |
|
| | resnets = [
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=in_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | ]
|
| | attentions = []
|
| |
|
| | if attention_head_dim is None:
|
| | logger.warn(
|
| | f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}."
|
| | )
|
| | attention_head_dim = in_channels
|
| |
|
| | for _ in range(num_layers):
|
| | if self.add_attention:
|
| | attentions.append(
|
| | Attention(
|
| | in_channels,
|
| | heads=in_channels // attention_head_dim,
|
| | dim_head=attention_head_dim,
|
| | rescale_output_factor=output_scale_factor,
|
| | eps=resnet_eps,
|
| | norm_num_groups=attn_groups,
|
| | spatial_norm_dim=temb_channels
|
| | if resnet_time_scale_shift == "spatial"
|
| | else None,
|
| | residual_connection=True,
|
| | bias=True,
|
| | upcast_softmax=True,
|
| | _from_deprecated_attn_block=True,
|
| | )
|
| | )
|
| | else:
|
| | attentions.append(None)
|
| |
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=in_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| |
|
| | self.attentions = nn.ModuleList(attentions)
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | def forward(
|
| | self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None
|
| | ) -> torch.FloatTensor:
|
| | hidden_states = self.resnets[0](hidden_states, temb)
|
| | for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
| | if attn is not None:
|
| | hidden_states = attn(hidden_states, temb=temb)
|
| | hidden_states = resnet(hidden_states, temb)
|
| |
|
| | return hidden_states
|
| |
|
| |
|
| | class UNetMidBlock2DCrossAttn(nn.Module):
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | temb_channels: int,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | transformer_layers_per_block: Union[int, Tuple[int]] = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | resnet_pre_norm: bool = True,
|
| | num_attention_heads: int = 1,
|
| | output_scale_factor: float = 1.0,
|
| | cross_attention_dim: int = 1280,
|
| | dual_cross_attention: bool = False,
|
| | use_linear_projection: bool = False,
|
| | upcast_attention: bool = False,
|
| | attention_type: str = "default",
|
| | ):
|
| | super().__init__()
|
| |
|
| | self.has_cross_attention = True
|
| | self.num_attention_heads = num_attention_heads
|
| | resnet_groups = (
|
| | resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
|
| | )
|
| |
|
| |
|
| | if isinstance(transformer_layers_per_block, int):
|
| | transformer_layers_per_block = [transformer_layers_per_block] * num_layers
|
| |
|
| |
|
| | resnets = [
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=in_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | ]
|
| | attentions = []
|
| |
|
| | for i in range(num_layers):
|
| | if not dual_cross_attention:
|
| | attentions.append(
|
| | Transformer2DModel(
|
| | num_attention_heads,
|
| | in_channels // num_attention_heads,
|
| | in_channels=in_channels,
|
| | num_layers=transformer_layers_per_block[i],
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | use_linear_projection=use_linear_projection,
|
| | upcast_attention=upcast_attention,
|
| | attention_type=attention_type,
|
| | )
|
| | )
|
| | else:
|
| | attentions.append(
|
| | DualTransformer2DModel(
|
| | num_attention_heads,
|
| | in_channels // num_attention_heads,
|
| | in_channels=in_channels,
|
| | num_layers=1,
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | )
|
| | )
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=in_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| |
|
| | self.attentions = nn.ModuleList(attentions)
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | self.gradient_checkpointing = False
|
| |
|
| | def forward(
|
| | self,
|
| | hidden_states: torch.FloatTensor,
|
| | temb: Optional[torch.FloatTensor] = None,
|
| | encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| | attention_mask: Optional[torch.FloatTensor] = None,
|
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| | encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| | ) -> torch.FloatTensor:
|
| | lora_scale = (
|
| | cross_attention_kwargs.get("scale", 1.0)
|
| | if cross_attention_kwargs is not None
|
| | else 1.0
|
| | )
|
| | hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
|
| | for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
| | if self.training and self.gradient_checkpointing:
|
| |
|
| | def create_custom_forward(module, return_dict=None):
|
| | def custom_forward(*inputs):
|
| | if return_dict is not None:
|
| | return module(*inputs, return_dict=return_dict)
|
| | else:
|
| | return module(*inputs)
|
| |
|
| | return custom_forward
|
| |
|
| | ckpt_kwargs: Dict[str, Any] = (
|
| | {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
| | )
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet),
|
| | hidden_states,
|
| | temb,
|
| | **ckpt_kwargs,
|
| | )
|
| | else:
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| | hidden_states = resnet(hidden_states, temb, scale=lora_scale)
|
| |
|
| | return hidden_states
|
| |
|
| |
|
| | class CrossAttnDownBlock2D(nn.Module):
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | out_channels: int,
|
| | temb_channels: int,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | transformer_layers_per_block: Union[int, Tuple[int]] = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | resnet_pre_norm: bool = True,
|
| | num_attention_heads: int = 1,
|
| | cross_attention_dim: int = 1280,
|
| | output_scale_factor: float = 1.0,
|
| | downsample_padding: int = 1,
|
| | add_downsample: bool = True,
|
| | dual_cross_attention: bool = False,
|
| | use_linear_projection: bool = False,
|
| | only_cross_attention: bool = False,
|
| | upcast_attention: bool = False,
|
| | attention_type: str = "default",
|
| | ):
|
| | super().__init__()
|
| | resnets = []
|
| | attentions = []
|
| |
|
| | self.has_cross_attention = True
|
| | self.num_attention_heads = num_attention_heads
|
| | if isinstance(transformer_layers_per_block, int):
|
| | transformer_layers_per_block = [transformer_layers_per_block] * num_layers
|
| |
|
| | for i in range(num_layers):
|
| | in_channels = in_channels if i == 0 else out_channels
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| | if not dual_cross_attention:
|
| | attentions.append(
|
| | Transformer2DModel(
|
| | num_attention_heads,
|
| | out_channels // num_attention_heads,
|
| | in_channels=out_channels,
|
| | num_layers=transformer_layers_per_block[i],
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | use_linear_projection=use_linear_projection,
|
| | only_cross_attention=only_cross_attention,
|
| | upcast_attention=upcast_attention,
|
| | attention_type=attention_type,
|
| | )
|
| | )
|
| | else:
|
| | attentions.append(
|
| | DualTransformer2DModel(
|
| | num_attention_heads,
|
| | out_channels // num_attention_heads,
|
| | in_channels=out_channels,
|
| | num_layers=1,
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | )
|
| | )
|
| | self.attentions = nn.ModuleList(attentions)
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | if add_downsample:
|
| | self.downsamplers = nn.ModuleList(
|
| | [
|
| | Downsample2D(
|
| | out_channels,
|
| | use_conv=True,
|
| | out_channels=out_channels,
|
| | padding=downsample_padding,
|
| | name="op",
|
| | )
|
| | ]
|
| | )
|
| | else:
|
| | self.downsamplers = None
|
| |
|
| | self.gradient_checkpointing = False
|
| |
|
| | def forward(
|
| | self,
|
| | hidden_states: torch.FloatTensor,
|
| | temb: Optional[torch.FloatTensor] = None,
|
| | encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| | attention_mask: Optional[torch.FloatTensor] = None,
|
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| | encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| | additional_residuals: Optional[torch.FloatTensor] = None,
|
| | ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
|
| | output_states = ()
|
| |
|
| | lora_scale = (
|
| | cross_attention_kwargs.get("scale", 1.0)
|
| | if cross_attention_kwargs is not None
|
| | else 1.0
|
| | )
|
| |
|
| | blocks = list(zip(self.resnets, self.attentions))
|
| |
|
| | for i, (resnet, attn) in enumerate(blocks):
|
| | if self.training and self.gradient_checkpointing:
|
| |
|
| | def create_custom_forward(module, return_dict=None):
|
| | def custom_forward(*inputs):
|
| | if return_dict is not None:
|
| | return module(*inputs, return_dict=return_dict)
|
| | else:
|
| | return module(*inputs)
|
| |
|
| | return custom_forward
|
| |
|
| | ckpt_kwargs: Dict[str, Any] = (
|
| | {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
| | )
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet),
|
| | hidden_states,
|
| | temb,
|
| | **ckpt_kwargs,
|
| | )
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| | else:
|
| | hidden_states = resnet(hidden_states, temb, scale=lora_scale)
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| |
|
| |
|
| | if i == len(blocks) - 1 and additional_residuals is not None:
|
| | hidden_states = hidden_states + additional_residuals
|
| |
|
| | output_states = output_states + (hidden_states,)
|
| |
|
| | if self.downsamplers is not None:
|
| | for downsampler in self.downsamplers:
|
| | hidden_states = downsampler(hidden_states, scale=lora_scale)
|
| |
|
| | output_states = output_states + (hidden_states,)
|
| |
|
| | return hidden_states, output_states
|
| |
|
| |
|
| | class DownBlock2D(nn.Module):
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | out_channels: int,
|
| | temb_channels: int,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | resnet_pre_norm: bool = True,
|
| | output_scale_factor: float = 1.0,
|
| | add_downsample: bool = True,
|
| | downsample_padding: int = 1,
|
| | ):
|
| | super().__init__()
|
| | resnets = []
|
| |
|
| | for i in range(num_layers):
|
| | in_channels = in_channels if i == 0 else out_channels
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=in_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| |
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | if add_downsample:
|
| | self.downsamplers = nn.ModuleList(
|
| | [
|
| | Downsample2D(
|
| | out_channels,
|
| | use_conv=True,
|
| | out_channels=out_channels,
|
| | padding=downsample_padding,
|
| | name="op",
|
| | )
|
| | ]
|
| | )
|
| | else:
|
| | self.downsamplers = None
|
| |
|
| | self.gradient_checkpointing = False
|
| |
|
| | def forward(
|
| | self,
|
| | hidden_states: torch.FloatTensor,
|
| | temb: Optional[torch.FloatTensor] = None,
|
| | scale: float = 1.0,
|
| | ) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
|
| | output_states = ()
|
| |
|
| | for resnet in self.resnets:
|
| | if self.training and self.gradient_checkpointing:
|
| |
|
| | def create_custom_forward(module):
|
| | def custom_forward(*inputs):
|
| | return module(*inputs)
|
| |
|
| | return custom_forward
|
| |
|
| | if is_torch_version(">=", "1.11.0"):
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet),
|
| | hidden_states,
|
| | temb,
|
| | use_reentrant=False,
|
| | )
|
| | else:
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet), hidden_states, temb
|
| | )
|
| | else:
|
| | hidden_states = resnet(hidden_states, temb, scale=scale)
|
| |
|
| | output_states = output_states + (hidden_states,)
|
| |
|
| | if self.downsamplers is not None:
|
| | for downsampler in self.downsamplers:
|
| | hidden_states = downsampler(hidden_states, scale=scale)
|
| |
|
| | output_states = output_states + (hidden_states,)
|
| |
|
| | return hidden_states, output_states
|
| |
|
| |
|
| | class CrossAttnUpBlock2D(nn.Module):
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | out_channels: int,
|
| | prev_output_channel: int,
|
| | temb_channels: int,
|
| | resolution_idx: Optional[int] = None,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | transformer_layers_per_block: Union[int, Tuple[int]] = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | resnet_pre_norm: bool = True,
|
| | num_attention_heads: int = 1,
|
| | cross_attention_dim: int = 1280,
|
| | output_scale_factor: float = 1.0,
|
| | add_upsample: bool = True,
|
| | dual_cross_attention: bool = False,
|
| | use_linear_projection: bool = False,
|
| | only_cross_attention: bool = False,
|
| | upcast_attention: bool = False,
|
| | attention_type: str = "default",
|
| | ):
|
| | super().__init__()
|
| | resnets = []
|
| | attentions = []
|
| |
|
| | self.has_cross_attention = True
|
| | self.num_attention_heads = num_attention_heads
|
| |
|
| | if isinstance(transformer_layers_per_block, int):
|
| | transformer_layers_per_block = [transformer_layers_per_block] * num_layers
|
| |
|
| | for i in range(num_layers):
|
| | res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
|
| | resnet_in_channels = prev_output_channel if i == 0 else out_channels
|
| |
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=resnet_in_channels + res_skip_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| | if not dual_cross_attention:
|
| | attentions.append(
|
| | Transformer2DModel(
|
| | num_attention_heads,
|
| | out_channels // num_attention_heads,
|
| | in_channels=out_channels,
|
| | num_layers=transformer_layers_per_block[i],
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | use_linear_projection=use_linear_projection,
|
| | only_cross_attention=only_cross_attention,
|
| | upcast_attention=upcast_attention,
|
| | attention_type=attention_type,
|
| | )
|
| | )
|
| | else:
|
| | attentions.append(
|
| | DualTransformer2DModel(
|
| | num_attention_heads,
|
| | out_channels // num_attention_heads,
|
| | in_channels=out_channels,
|
| | num_layers=1,
|
| | cross_attention_dim=cross_attention_dim,
|
| | norm_num_groups=resnet_groups,
|
| | )
|
| | )
|
| | self.attentions = nn.ModuleList(attentions)
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | if add_upsample:
|
| | self.upsamplers = nn.ModuleList(
|
| | [Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]
|
| | )
|
| | else:
|
| | self.upsamplers = None
|
| |
|
| | self.gradient_checkpointing = False
|
| | self.resolution_idx = resolution_idx
|
| |
|
| | def forward(
|
| | self,
|
| | hidden_states: torch.FloatTensor,
|
| | res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
| | temb: Optional[torch.FloatTensor] = None,
|
| | encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| | upsample_size: Optional[int] = None,
|
| | attention_mask: Optional[torch.FloatTensor] = None,
|
| | encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
| | ) -> torch.FloatTensor:
|
| | lora_scale = (
|
| | cross_attention_kwargs.get("scale", 1.0)
|
| | if cross_attention_kwargs is not None
|
| | else 1.0
|
| | )
|
| | is_freeu_enabled = (
|
| | getattr(self, "s1", None)
|
| | and getattr(self, "s2", None)
|
| | and getattr(self, "b1", None)
|
| | and getattr(self, "b2", None)
|
| | )
|
| |
|
| | for resnet, attn in zip(self.resnets, self.attentions):
|
| |
|
| | res_hidden_states = res_hidden_states_tuple[-1]
|
| | res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
| |
|
| |
|
| | if is_freeu_enabled:
|
| | hidden_states, res_hidden_states = apply_freeu(
|
| | self.resolution_idx,
|
| | hidden_states,
|
| | res_hidden_states,
|
| | s1=self.s1,
|
| | s2=self.s2,
|
| | b1=self.b1,
|
| | b2=self.b2,
|
| | )
|
| |
|
| | hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
| |
|
| | if self.training and self.gradient_checkpointing:
|
| |
|
| | def create_custom_forward(module, return_dict=None):
|
| | def custom_forward(*inputs):
|
| | if return_dict is not None:
|
| | return module(*inputs, return_dict=return_dict)
|
| | else:
|
| | return module(*inputs)
|
| |
|
| | return custom_forward
|
| |
|
| | ckpt_kwargs: Dict[str, Any] = (
|
| | {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
| | )
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet),
|
| | hidden_states,
|
| | temb,
|
| | **ckpt_kwargs,
|
| | )
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| | else:
|
| | hidden_states = resnet(hidden_states, temb, scale=lora_scale)
|
| | hidden_states, ref_feature = attn(
|
| | hidden_states,
|
| | encoder_hidden_states=encoder_hidden_states,
|
| | cross_attention_kwargs=cross_attention_kwargs,
|
| | attention_mask=attention_mask,
|
| | encoder_attention_mask=encoder_attention_mask,
|
| | return_dict=False,
|
| | )
|
| |
|
| | if self.upsamplers is not None:
|
| | for upsampler in self.upsamplers:
|
| | hidden_states = upsampler(
|
| | hidden_states, upsample_size, scale=lora_scale
|
| | )
|
| |
|
| | return hidden_states
|
| |
|
| |
|
| | class UpBlock2D(nn.Module):
|
| | def __init__(
|
| | self,
|
| | in_channels: int,
|
| | prev_output_channel: int,
|
| | out_channels: int,
|
| | temb_channels: int,
|
| | resolution_idx: Optional[int] = None,
|
| | dropout: float = 0.0,
|
| | num_layers: int = 1,
|
| | resnet_eps: float = 1e-6,
|
| | resnet_time_scale_shift: str = "default",
|
| | resnet_act_fn: str = "swish",
|
| | resnet_groups: int = 32,
|
| | resnet_pre_norm: bool = True,
|
| | output_scale_factor: float = 1.0,
|
| | add_upsample: bool = True,
|
| | ):
|
| | super().__init__()
|
| | resnets = []
|
| |
|
| | for i in range(num_layers):
|
| | res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
|
| | resnet_in_channels = prev_output_channel if i == 0 else out_channels
|
| |
|
| | resnets.append(
|
| | ResnetBlock2D(
|
| | in_channels=resnet_in_channels + res_skip_channels,
|
| | out_channels=out_channels,
|
| | temb_channels=temb_channels,
|
| | eps=resnet_eps,
|
| | groups=resnet_groups,
|
| | dropout=dropout,
|
| | time_embedding_norm=resnet_time_scale_shift,
|
| | non_linearity=resnet_act_fn,
|
| | output_scale_factor=output_scale_factor,
|
| | pre_norm=resnet_pre_norm,
|
| | )
|
| | )
|
| |
|
| | self.resnets = nn.ModuleList(resnets)
|
| |
|
| | if add_upsample:
|
| | self.upsamplers = nn.ModuleList(
|
| | [Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]
|
| | )
|
| | else:
|
| | self.upsamplers = None
|
| |
|
| | self.gradient_checkpointing = False
|
| | self.resolution_idx = resolution_idx
|
| |
|
| | def forward(
|
| | self,
|
| | hidden_states: torch.FloatTensor,
|
| | res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
| | temb: Optional[torch.FloatTensor] = None,
|
| | upsample_size: Optional[int] = None,
|
| | scale: float = 1.0,
|
| | ) -> torch.FloatTensor:
|
| | is_freeu_enabled = (
|
| | getattr(self, "s1", None)
|
| | and getattr(self, "s2", None)
|
| | and getattr(self, "b1", None)
|
| | and getattr(self, "b2", None)
|
| | )
|
| |
|
| | for resnet in self.resnets:
|
| |
|
| | res_hidden_states = res_hidden_states_tuple[-1]
|
| | res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
| |
|
| |
|
| | if is_freeu_enabled:
|
| | hidden_states, res_hidden_states = apply_freeu(
|
| | self.resolution_idx,
|
| | hidden_states,
|
| | res_hidden_states,
|
| | s1=self.s1,
|
| | s2=self.s2,
|
| | b1=self.b1,
|
| | b2=self.b2,
|
| | )
|
| |
|
| | hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
| |
|
| | if self.training and self.gradient_checkpointing:
|
| |
|
| | def create_custom_forward(module):
|
| | def custom_forward(*inputs):
|
| | return module(*inputs)
|
| |
|
| | return custom_forward
|
| |
|
| | if is_torch_version(">=", "1.11.0"):
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet),
|
| | hidden_states,
|
| | temb,
|
| | use_reentrant=False,
|
| | )
|
| | else:
|
| | hidden_states = torch.utils.checkpoint.checkpoint(
|
| | create_custom_forward(resnet), hidden_states, temb
|
| | )
|
| | else:
|
| | hidden_states = resnet(hidden_states, temb, scale=scale)
|
| |
|
| | if self.upsamplers is not None:
|
| | for upsampler in self.upsamplers:
|
| | hidden_states = upsampler(hidden_states, upsample_size, scale=scale)
|
| |
|
| | return hidden_states
|
| |
|