| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from dataclasses import dataclass |
| from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
| import paddle |
| import paddle.nn as nn |
|
|
| from ..configuration_utils import ConfigMixin, register_to_config |
| from ..loaders import UNet2DConditionLoadersMixin |
| from ..modeling_utils import ModelMixin |
| from ..utils import BaseOutput, logging |
| from .cross_attention import AttnProcessor |
| from .embeddings import TimestepEmbedding, Timesteps |
| from .unet_2d_blocks import ( |
| CrossAttnDownBlock2D, |
| CrossAttnUpBlock2D, |
| DownBlock2D, |
| UNetMidBlock2DCrossAttn, |
| UNetMidBlock2DSimpleCrossAttn, |
| UpBlock2D, |
| get_down_block, |
| get_up_block, |
| ) |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| @dataclass |
| class UNet2DConditionOutput(BaseOutput): |
| """ |
| Args: |
| sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`): |
| Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. |
| """ |
|
|
| sample: paddle.Tensor |
|
|
|
|
| class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): |
| r""" |
| UNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep |
| and returns sample shaped output. |
| |
| This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library |
| implements for all the models (such as downloading or saving, etc.) |
| |
| Parameters: |
| sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): |
| Height and width of input/output sample. |
| in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. |
| out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. |
| center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. |
| flip_sin_to_cos (`bool`, *optional*, defaults to `False`): |
| Whether to flip the sin to cos in the time embedding. |
| freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. |
| down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): |
| The tuple of downsample blocks to use. |
| mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): |
| The mid block type. Choose from `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. |
| up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): |
| The tuple of upsample blocks to use. |
| block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): |
| The tuple of output channels for each block. |
| layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. |
| downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. |
| mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. |
| act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. |
| norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. |
| norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. |
| cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. |
| attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. |
| resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config |
| for resnet blocks, see [`~models.resnet.ResnetBlock2D`]. Choose from `default` or `scale_shift`. |
| class_embed_type (`str`, *optional*, defaults to None): The type of class embedding to use which is ultimately |
| summed with the time embeddings. Choose from `None`, `"timestep"`, or `"identity"`. |
| """ |
|
|
| _supports_gradient_checkpointing = True |
|
|
| @register_to_config |
| def __init__( |
| self, |
| sample_size: Optional[int] = None, |
| in_channels: int = 4, |
| out_channels: int = 4, |
| center_input_sample: bool = False, |
| flip_sin_to_cos: bool = True, |
| freq_shift: int = 0, |
| down_block_types: Tuple[str] = ( |
| "CrossAttnDownBlock2D", |
| "CrossAttnDownBlock2D", |
| "CrossAttnDownBlock2D", |
| "DownBlock2D", |
| ), |
| mid_block_type: str = "UNetMidBlock2DCrossAttn", |
| up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), |
| only_cross_attention: Union[bool, Tuple[bool]] = False, |
| block_out_channels: Tuple[int] = (320, 640, 1280, 1280), |
| layers_per_block: int = 2, |
| downsample_padding: int = 1, |
| mid_block_scale_factor: float = 1, |
| act_fn: str = "silu", |
| norm_num_groups: int = 32, |
| norm_eps: float = 1e-5, |
| cross_attention_dim: int = 1280, |
| attention_head_dim: Union[int, Tuple[int]] = 8, |
| dual_cross_attention: bool = False, |
| use_linear_projection: bool = False, |
| class_embed_type: Optional[str] = None, |
| num_class_embeds: Optional[int] = None, |
| upcast_attention: bool = False, |
| resnet_time_scale_shift: str = "default", |
| ): |
| super().__init__() |
|
|
| self.sample_size = sample_size |
| time_embed_dim = block_out_channels[0] * 4 |
|
|
| |
| self.conv_in = nn.Conv2D(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) |
|
|
| |
| self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) |
| timestep_input_dim = block_out_channels[0] |
|
|
| self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
|
|
| |
| if class_embed_type is None and num_class_embeds is not None: |
| self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) |
| elif class_embed_type == "timestep": |
| self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
| elif class_embed_type == "identity": |
| self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) |
| else: |
| self.class_embedding = None |
|
|
| self.down_blocks = nn.LayerList([]) |
| self.mid_block = None |
| self.up_blocks = nn.LayerList([]) |
|
|
| if isinstance(only_cross_attention, bool): |
| only_cross_attention = [only_cross_attention] * len(down_block_types) |
|
|
| if isinstance(attention_head_dim, int): |
| attention_head_dim = (attention_head_dim,) * len(down_block_types) |
|
|
| |
| output_channel = block_out_channels[0] |
| for i, down_block_type in enumerate(down_block_types): |
| input_channel = output_channel |
| output_channel = block_out_channels[i] |
| is_final_block = i == len(block_out_channels) - 1 |
|
|
| down_block = get_down_block( |
| down_block_type, |
| num_layers=layers_per_block, |
| in_channels=input_channel, |
| out_channels=output_channel, |
| temb_channels=time_embed_dim, |
| add_downsample=not is_final_block, |
| resnet_eps=norm_eps, |
| resnet_act_fn=act_fn, |
| resnet_groups=norm_num_groups, |
| cross_attention_dim=cross_attention_dim, |
| attn_num_head_channels=attention_head_dim[i], |
| downsample_padding=downsample_padding, |
| dual_cross_attention=dual_cross_attention, |
| use_linear_projection=use_linear_projection, |
| only_cross_attention=only_cross_attention[i], |
| upcast_attention=upcast_attention, |
| resnet_time_scale_shift=resnet_time_scale_shift, |
| ) |
| self.down_blocks.append(down_block) |
|
|
| |
| if mid_block_type == "UNetMidBlock2DCrossAttn": |
| self.mid_block = UNetMidBlock2DCrossAttn( |
| in_channels=block_out_channels[-1], |
| temb_channels=time_embed_dim, |
| resnet_eps=norm_eps, |
| resnet_act_fn=act_fn, |
| output_scale_factor=mid_block_scale_factor, |
| resnet_time_scale_shift=resnet_time_scale_shift, |
| cross_attention_dim=cross_attention_dim, |
| attn_num_head_channels=attention_head_dim[-1], |
| resnet_groups=norm_num_groups, |
| dual_cross_attention=dual_cross_attention, |
| use_linear_projection=use_linear_projection, |
| upcast_attention=upcast_attention, |
| ) |
| elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": |
| self.mid_block = UNetMidBlock2DSimpleCrossAttn( |
| in_channels=block_out_channels[-1], |
| temb_channels=time_embed_dim, |
| resnet_eps=norm_eps, |
| resnet_act_fn=act_fn, |
| output_scale_factor=mid_block_scale_factor, |
| cross_attention_dim=cross_attention_dim, |
| attn_num_head_channels=attention_head_dim[-1], |
| resnet_groups=norm_num_groups, |
| resnet_time_scale_shift=resnet_time_scale_shift, |
| ) |
| else: |
| raise ValueError(f"unknown mid_block_type : {mid_block_type}") |
|
|
| |
| self.num_upsamplers = 0 |
|
|
| |
| reversed_block_out_channels = list(reversed(block_out_channels)) |
| reversed_attention_head_dim = list(reversed(attention_head_dim)) |
| reversed_only_cross_attention = list(reversed(only_cross_attention)) |
|
|
| output_channel = reversed_block_out_channels[0] |
| for i, up_block_type in enumerate(up_block_types): |
| is_final_block = i == len(block_out_channels) - 1 |
|
|
| prev_output_channel = output_channel |
| output_channel = reversed_block_out_channels[i] |
| input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] |
|
|
| |
| if not is_final_block: |
| add_upsample = True |
| self.num_upsamplers += 1 |
| else: |
| add_upsample = False |
|
|
| up_block = get_up_block( |
| up_block_type, |
| num_layers=layers_per_block + 1, |
| in_channels=input_channel, |
| out_channels=output_channel, |
| prev_output_channel=prev_output_channel, |
| temb_channels=time_embed_dim, |
| add_upsample=add_upsample, |
| resnet_eps=norm_eps, |
| resnet_act_fn=act_fn, |
| resnet_groups=norm_num_groups, |
| cross_attention_dim=cross_attention_dim, |
| attn_num_head_channels=reversed_attention_head_dim[i], |
| dual_cross_attention=dual_cross_attention, |
| use_linear_projection=use_linear_projection, |
| only_cross_attention=reversed_only_cross_attention[i], |
| upcast_attention=upcast_attention, |
| resnet_time_scale_shift=resnet_time_scale_shift, |
| ) |
| self.up_blocks.append(up_block) |
| prev_output_channel = output_channel |
|
|
| |
| self.conv_norm_out = nn.GroupNorm( |
| num_channels=block_out_channels[0], num_groups=norm_num_groups, epsilon=norm_eps |
| ) |
| self.conv_act = nn.Silu() |
| self.conv_out = nn.Conv2D(block_out_channels[0], out_channels, 3, padding=1) |
|
|
| @property |
| def attn_processors(self) -> Dict[str, AttnProcessor]: |
| r""" |
| Returns: |
| `dict` of attention processors: A dictionary containing all attention processors used in the model with |
| indexed by its weight name. |
| """ |
| |
| processors = {} |
|
|
| def fn_recursive_add_processors(name: str, module: nn.Layer, processors: Dict[str, AttnProcessor]): |
| if hasattr(module, "set_processor"): |
| processors[f"{name}.processor"] = module.processor |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
| return processors |
|
|
| for name, module in self.named_children(): |
| fn_recursive_add_processors(name, module, processors) |
|
|
| return processors |
|
|
| def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]): |
| r""" |
| Parameters: |
| `processor (`dict` of `AttnProcessor` or `AttnProcessor`): |
| The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| of **all** `CrossAttention` layers. |
| In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.: |
| """ |
| count = len(self.attn_processors.keys()) |
|
|
| if isinstance(processor, dict) and len(processor) != count: |
| raise ValueError( |
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| ) |
|
|
| def fn_recursive_attn_processor(name: str, module: nn.Layer, processor): |
| if hasattr(module, "set_processor"): |
| if not isinstance(processor, dict): |
| module.set_processor(processor) |
| else: |
| module.set_processor(processor.pop(f"{name}.processor")) |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
| for name, module in self.named_children(): |
| fn_recursive_attn_processor(name, module, processor) |
|
|
| def set_attention_slice(self, slice_size): |
| r""" |
| Enable sliced attention computation. |
| When this option is enabled, the attention module will split the input tensor in slices, to compute attention |
| in several steps. This is useful to save some memory in exchange for a small speed decrease. |
| Args: |
| slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): |
| When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If |
| `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is |
| provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` |
| must be a multiple of `slice_size`. |
| """ |
| sliceable_head_dims = [] |
|
|
| def fn_recursive_retrieve_slicable_dims(module: nn.Layer): |
| if hasattr(module, "set_attention_slice"): |
| sliceable_head_dims.append(module.sliceable_head_dim) |
|
|
| for child in module.children(): |
| fn_recursive_retrieve_slicable_dims(child) |
|
|
| |
| for module in self.children(): |
| fn_recursive_retrieve_slicable_dims(module) |
|
|
| num_slicable_layers = len(sliceable_head_dims) |
|
|
| if slice_size == "auto": |
| |
| |
| slice_size = [dim // 2 for dim in sliceable_head_dims] |
| elif slice_size == "max": |
| |
| slice_size = num_slicable_layers * [1] |
|
|
| slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size |
|
|
| if len(slice_size) != len(sliceable_head_dims): |
| raise ValueError( |
| f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" |
| f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." |
| ) |
|
|
| for i in range(len(slice_size)): |
| size = slice_size[i] |
| dim = sliceable_head_dims[i] |
| if size is not None and size > dim: |
| raise ValueError(f"size {size} has to be smaller or equal to {dim}.") |
|
|
| |
| |
| |
| def fn_recursive_set_attention_slice(module: nn.Layer, slice_size: List[int]): |
| if hasattr(module, "set_attention_slice"): |
| module.set_attention_slice(slice_size.pop()) |
|
|
| for child in module.children(): |
| fn_recursive_set_attention_slice(child, slice_size) |
|
|
| reversed_slice_size = list(reversed(slice_size)) |
| for module in self.children(): |
| fn_recursive_set_attention_slice(module, reversed_slice_size) |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D, CrossAttnUpBlock2D, UpBlock2D)): |
| module.gradient_checkpointing = value |
|
|
| def forward( |
| self, |
| sample: paddle.Tensor, |
| timestep: Union[paddle.Tensor, float, int], |
| encoder_hidden_states: paddle.Tensor, |
| class_labels: Optional[paddle.Tensor] = None, |
| attention_mask: Optional[paddle.Tensor] = None, |
| cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| return_dict: bool = True, |
| ): |
| r""" |
| Args: |
| sample (`paddle.Tensor`): (batch, channel, height, width) noisy inputs tensor |
| timestep (`paddle.Tensor` or `float` or `int`): (batch) timesteps |
| encoder_hidden_states (`paddle.Tensor`): (batch, sequence_length, feature_dim) encoder hidden states |
| return_dict (`bool`, *optional*, defaults to `True`): |
| Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. |
| |
| Returns: |
| [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: |
| [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When |
| returning a tuple, the first element is the sample tensor. |
| """ |
| |
| |
| |
| |
| default_overall_up_factor = 2**self.num_upsamplers |
|
|
| |
| forward_upsample_size = False |
| upsample_size = None |
|
|
| if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): |
| logger.info("Forward upsample size to force interpolation output size.") |
| forward_upsample_size = True |
|
|
| |
| if attention_mask is not None: |
| attention_mask = (1 - attention_mask.cast(sample.dtype)) * -10000.0 |
| attention_mask = attention_mask.unsqueeze(1) |
|
|
| |
| if self.config.center_input_sample: |
| sample = 2 * sample - 1.0 |
|
|
| |
| timesteps = timestep |
| if not paddle.is_tensor(timesteps): |
| |
| timesteps = paddle.to_tensor([timesteps], dtype="int64") |
| elif paddle.is_tensor(timesteps) and len(timesteps.shape) == 0: |
| timesteps = timesteps[None] |
|
|
| |
| timesteps = timesteps.expand( |
| [ |
| sample.shape[0], |
| ] |
| ) |
|
|
| t_emb = self.time_proj(timesteps) |
|
|
| |
| |
| |
| t_emb = t_emb.cast(self.dtype) |
| emb = self.time_embedding(t_emb) |
|
|
| if self.class_embedding is not None: |
| if class_labels is None: |
| raise ValueError("class_labels should be provided when num_class_embeds > 0") |
|
|
| if self.config.class_embed_type == "timestep": |
| class_labels = self.time_proj(class_labels) |
|
|
| class_emb = self.class_embedding(class_labels).cast(self.dtype) |
| emb = emb + class_emb |
|
|
| |
| sample = self.conv_in(sample) |
|
|
| |
| down_block_res_samples = (sample,) |
| for downsample_block in self.down_blocks: |
| if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: |
| sample, res_samples = downsample_block( |
| hidden_states=sample, |
| temb=emb, |
| encoder_hidden_states=encoder_hidden_states, |
| attention_mask=attention_mask, |
| cross_attention_kwargs=cross_attention_kwargs, |
| ) |
| else: |
| sample, res_samples = downsample_block(hidden_states=sample, temb=emb) |
|
|
| down_block_res_samples += res_samples |
|
|
| |
| sample = self.mid_block( |
| sample, |
| emb, |
| encoder_hidden_states=encoder_hidden_states, |
| attention_mask=attention_mask, |
| cross_attention_kwargs=cross_attention_kwargs, |
| ) |
| |
| for i, upsample_block in enumerate(self.up_blocks): |
| is_final_block = i == len(self.up_blocks) - 1 |
|
|
| res_samples = down_block_res_samples[-len(upsample_block.resnets) :] |
| down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] |
|
|
| |
| |
| if not is_final_block and forward_upsample_size: |
| upsample_size = down_block_res_samples[-1].shape[2:] |
|
|
| if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: |
| sample = upsample_block( |
| hidden_states=sample, |
| temb=emb, |
| res_hidden_states_tuple=res_samples, |
| encoder_hidden_states=encoder_hidden_states, |
| cross_attention_kwargs=cross_attention_kwargs, |
| upsample_size=upsample_size, |
| attention_mask=attention_mask, |
| ) |
| else: |
| sample = upsample_block( |
| hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size |
| ) |
| |
| sample = self.conv_norm_out(sample) |
| sample = self.conv_act(sample) |
| sample = self.conv_out(sample) |
|
|
| if not return_dict: |
| return (sample,) |
|
|
| return UNet2DConditionOutput(sample=sample) |
|
|