| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | from collections import OrderedDict |
| | from dataclasses import dataclass |
| | from typing import Any, Dict, List, Optional, Tuple, Union |
| |
|
| | import torch |
| | from diffusers.configuration_utils import ConfigMixin, register_to_config |
| | from diffusers.loaders import FromOriginalModelMixin |
| | from diffusers.models.attention_processor import ( |
| | ADDED_KV_ATTENTION_PROCESSORS, |
| | CROSS_ATTENTION_PROCESSORS, |
| | AttentionProcessor, |
| | AttnAddedKVProcessor, |
| | AttnProcessor, |
| | ) |
| | from diffusers.models.embeddings import ( |
| | TextImageProjection, |
| | TextImageTimeEmbedding, |
| | TextTimeEmbedding, |
| | TimestepEmbedding, |
| | Timesteps, |
| | ) |
| | from diffusers.models.modeling_utils import ModelMixin |
| | from diffusers.models.unets.unet_2d_blocks import ( |
| | CrossAttnDownBlock2D, |
| | DownBlock2D, |
| | UNetMidBlock2DCrossAttn, |
| | get_down_block, |
| | ) |
| | from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel |
| | from diffusers.utils import BaseOutput, logging |
| | from torch import nn |
| | from torch.nn import functional as F |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | |
| | |
| | |
| | class QuickGELU(nn.Module): |
| | def forward(self, x: torch.Tensor): |
| | return x * torch.sigmoid(1.702 * x) |
| |
|
| |
|
| | class LayerNorm(nn.LayerNorm): |
| | """Subclass torch's LayerNorm to handle fp16.""" |
| |
|
| | def forward(self, x: torch.Tensor): |
| | orig_type = x.dtype |
| | ret = super().forward(x) |
| | return ret.type(orig_type) |
| |
|
| |
|
| | class ResidualAttentionBlock(nn.Module): |
| | def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): |
| | super().__init__() |
| |
|
| | self.attn = nn.MultiheadAttention(d_model, n_head) |
| | self.ln_1 = LayerNorm(d_model) |
| | self.mlp = nn.Sequential( |
| | OrderedDict( |
| | [ |
| | ("c_fc", nn.Linear(d_model, d_model * 4)), |
| | ("gelu", QuickGELU()), |
| | ("c_proj", nn.Linear(d_model * 4, d_model)), |
| | ] |
| | ) |
| | ) |
| | self.ln_2 = LayerNorm(d_model) |
| | self.attn_mask = attn_mask |
| |
|
| | def attention(self, x: torch.Tensor): |
| | self.attn_mask = ( |
| | self.attn_mask.to(dtype=x.dtype, device=x.device) |
| | if self.attn_mask is not None |
| | else None |
| | ) |
| | return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] |
| |
|
| | def forward(self, x: torch.Tensor): |
| | x = x + self.attention(self.ln_1(x)) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| | |
| |
|
| |
|
| | @dataclass |
| | class ControlNetOutput(BaseOutput): |
| | """ |
| | The output of [`ControlNetModel`]. |
| | |
| | Args: |
| | down_block_res_samples (`tuple[torch.Tensor]`): |
| | A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should |
| | be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be |
| | used to condition the original UNet's downsampling activations. |
| | mid_down_block_re_sample (`torch.Tensor`): |
| | The activation of the midde block (the lowest sample resolution). Each tensor should be of shape |
| | `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. |
| | Output can be used to condition the original UNet's middle block activation. |
| | """ |
| |
|
| | down_block_res_samples: Tuple[torch.Tensor] |
| | mid_block_res_sample: torch.Tensor |
| |
|
| |
|
| | class ControlNetConditioningEmbedding(nn.Module): |
| | """ |
| | Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN |
| | [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized |
| | training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the |
| | convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides |
| | (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full |
| | model) to encode image-space conditions ... into feature maps ..." |
| | """ |
| |
|
| | |
| | def __init__( |
| | self, |
| | conditioning_embedding_channels: int, |
| | conditioning_channels: int = 3, |
| | block_out_channels: Tuple[int] = (48, 96, 192, 384), |
| | ): |
| | super().__init__() |
| |
|
| | self.conv_in = nn.Conv2d( |
| | conditioning_channels, block_out_channels[0], kernel_size=3, padding=1 |
| | ) |
| |
|
| | self.blocks = nn.ModuleList([]) |
| |
|
| | for i in range(len(block_out_channels) - 1): |
| | channel_in = block_out_channels[i] |
| | channel_out = block_out_channels[i + 1] |
| | self.blocks.append( |
| | nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1) |
| | ) |
| | self.blocks.append( |
| | nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2) |
| | ) |
| |
|
| | self.conv_out = zero_module( |
| | nn.Conv2d( |
| | block_out_channels[-1], |
| | conditioning_embedding_channels, |
| | kernel_size=3, |
| | padding=1, |
| | ) |
| | ) |
| |
|
| | def forward(self, conditioning): |
| | embedding = self.conv_in(conditioning) |
| | embedding = F.silu(embedding) |
| |
|
| | for block in self.blocks: |
| | embedding = block(embedding) |
| | embedding = F.silu(embedding) |
| |
|
| | embedding = self.conv_out(embedding) |
| |
|
| | return embedding |
| |
|
| |
|
| | class ControlNetModel_Union(ModelMixin, ConfigMixin, FromOriginalModelMixin): |
| | """ |
| | A ControlNet model. |
| | |
| | Args: |
| | in_channels (`int`, defaults to 4): |
| | The number of channels in the input sample. |
| | flip_sin_to_cos (`bool`, defaults to `True`): |
| | Whether to flip the sin to cos in the time embedding. |
| | freq_shift (`int`, defaults to 0): |
| | The frequency shift to apply to the time embedding. |
| | down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): |
| | The tuple of downsample blocks to use. |
| | only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): |
| | block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): |
| | The tuple of output channels for each block. |
| | layers_per_block (`int`, defaults to 2): |
| | The number of layers per block. |
| | downsample_padding (`int`, defaults to 1): |
| | The padding to use for the downsampling convolution. |
| | mid_block_scale_factor (`float`, defaults to 1): |
| | The scale factor to use for the mid block. |
| | act_fn (`str`, defaults to "silu"): |
| | The activation function to use. |
| | norm_num_groups (`int`, *optional*, defaults to 32): |
| | The number of groups to use for the normalization. If None, normalization and activation layers is skipped |
| | in post-processing. |
| | norm_eps (`float`, defaults to 1e-5): |
| | The epsilon to use for the normalization. |
| | cross_attention_dim (`int`, defaults to 1280): |
| | The dimension of the cross attention features. |
| | transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): |
| | The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for |
| | [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], |
| | [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. |
| | encoder_hid_dim (`int`, *optional*, defaults to None): |
| | If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` |
| | dimension to `cross_attention_dim`. |
| | encoder_hid_dim_type (`str`, *optional*, defaults to `None`): |
| | If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text |
| | embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. |
| | attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): |
| | The dimension of the attention heads. |
| | use_linear_projection (`bool`, defaults to `False`): |
| | class_embed_type (`str`, *optional*, defaults to `None`): |
| | The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, |
| | `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. |
| | addition_embed_type (`str`, *optional*, defaults to `None`): |
| | Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or |
| | "text". "text" will use the `TextTimeEmbedding` layer. |
| | num_class_embeds (`int`, *optional*, defaults to 0): |
| | Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing |
| | class conditioning with `class_embed_type` equal to `None`. |
| | upcast_attention (`bool`, defaults to `False`): |
| | resnet_time_scale_shift (`str`, defaults to `"default"`): |
| | Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. |
| | projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): |
| | The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when |
| | `class_embed_type="projection"`. |
| | controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): |
| | The channel order of conditional image. Will convert to `rgb` if it's `bgr`. |
| | conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): |
| | The tuple of output channel for each block in the `conditioning_embedding` layer. |
| | global_pool_conditions (`bool`, defaults to `False`): |
| | """ |
| |
|
| | _supports_gradient_checkpointing = True |
| |
|
| | @register_to_config |
| | def __init__( |
| | self, |
| | in_channels: int = 4, |
| | conditioning_channels: int = 3, |
| | flip_sin_to_cos: bool = True, |
| | freq_shift: int = 0, |
| | down_block_types: Tuple[str] = ( |
| | "CrossAttnDownBlock2D", |
| | "CrossAttnDownBlock2D", |
| | "CrossAttnDownBlock2D", |
| | "DownBlock2D", |
| | ), |
| | only_cross_attention: Union[bool, Tuple[bool]] = False, |
| | block_out_channels: Tuple[int] = (320, 640, 1280, 1280), |
| | layers_per_block: int = 2, |
| | downsample_padding: int = 1, |
| | mid_block_scale_factor: float = 1, |
| | act_fn: str = "silu", |
| | norm_num_groups: Optional[int] = 32, |
| | norm_eps: float = 1e-5, |
| | cross_attention_dim: int = 1280, |
| | transformer_layers_per_block: Union[int, Tuple[int]] = 1, |
| | encoder_hid_dim: Optional[int] = None, |
| | encoder_hid_dim_type: Optional[str] = None, |
| | attention_head_dim: Union[int, Tuple[int]] = 8, |
| | num_attention_heads: Optional[Union[int, Tuple[int]]] = None, |
| | use_linear_projection: bool = False, |
| | class_embed_type: Optional[str] = None, |
| | addition_embed_type: Optional[str] = None, |
| | addition_time_embed_dim: Optional[int] = None, |
| | num_class_embeds: Optional[int] = None, |
| | upcast_attention: bool = False, |
| | resnet_time_scale_shift: str = "default", |
| | projection_class_embeddings_input_dim: Optional[int] = None, |
| | controlnet_conditioning_channel_order: str = "rgb", |
| | conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), |
| | global_pool_conditions: bool = False, |
| | addition_embed_type_num_heads=64, |
| | num_control_type=6, |
| | ): |
| | super().__init__() |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | num_attention_heads = num_attention_heads or attention_head_dim |
| |
|
| | |
| | if len(block_out_channels) != len(down_block_types): |
| | raise ValueError( |
| | f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | if not isinstance(only_cross_attention, bool) and len( |
| | only_cross_attention |
| | ) != len(down_block_types): |
| | raise ValueError( |
| | f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len( |
| | down_block_types |
| | ): |
| | raise ValueError( |
| | f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | if isinstance(transformer_layers_per_block, int): |
| | transformer_layers_per_block = [transformer_layers_per_block] * len( |
| | down_block_types |
| | ) |
| |
|
| | |
| | conv_in_kernel = 3 |
| | conv_in_padding = (conv_in_kernel - 1) // 2 |
| | self.conv_in = nn.Conv2d( |
| | in_channels, |
| | block_out_channels[0], |
| | kernel_size=conv_in_kernel, |
| | padding=conv_in_padding, |
| | ) |
| |
|
| | |
| | time_embed_dim = block_out_channels[0] * 4 |
| | self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) |
| | timestep_input_dim = block_out_channels[0] |
| | self.time_embedding = TimestepEmbedding( |
| | timestep_input_dim, |
| | time_embed_dim, |
| | act_fn=act_fn, |
| | ) |
| |
|
| | if encoder_hid_dim_type is None and encoder_hid_dim is not None: |
| | encoder_hid_dim_type = "text_proj" |
| | self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) |
| | logger.info( |
| | "encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined." |
| | ) |
| |
|
| | if encoder_hid_dim is None and encoder_hid_dim_type is not None: |
| | raise ValueError( |
| | f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." |
| | ) |
| |
|
| | if encoder_hid_dim_type == "text_proj": |
| | self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) |
| | elif encoder_hid_dim_type == "text_image_proj": |
| | |
| | |
| | |
| | self.encoder_hid_proj = TextImageProjection( |
| | text_embed_dim=encoder_hid_dim, |
| | image_embed_dim=cross_attention_dim, |
| | cross_attention_dim=cross_attention_dim, |
| | ) |
| |
|
| | elif encoder_hid_dim_type is not None: |
| | raise ValueError( |
| | f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." |
| | ) |
| | else: |
| | self.encoder_hid_proj = None |
| |
|
| | |
| | if class_embed_type is None and num_class_embeds is not None: |
| | self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) |
| | elif class_embed_type == "timestep": |
| | self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
| | elif class_embed_type == "identity": |
| | self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) |
| | elif class_embed_type == "projection": |
| | if projection_class_embeddings_input_dim is None: |
| | raise ValueError( |
| | "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" |
| | ) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | self.class_embedding = TimestepEmbedding( |
| | projection_class_embeddings_input_dim, time_embed_dim |
| | ) |
| | else: |
| | self.class_embedding = None |
| |
|
| | if addition_embed_type == "text": |
| | if encoder_hid_dim is not None: |
| | text_time_embedding_from_dim = encoder_hid_dim |
| | else: |
| | text_time_embedding_from_dim = cross_attention_dim |
| |
|
| | self.add_embedding = TextTimeEmbedding( |
| | text_time_embedding_from_dim, |
| | time_embed_dim, |
| | num_heads=addition_embed_type_num_heads, |
| | ) |
| | elif addition_embed_type == "text_image": |
| | |
| | |
| | |
| | self.add_embedding = TextImageTimeEmbedding( |
| | text_embed_dim=cross_attention_dim, |
| | image_embed_dim=cross_attention_dim, |
| | time_embed_dim=time_embed_dim, |
| | ) |
| | elif addition_embed_type == "text_time": |
| | self.add_time_proj = Timesteps( |
| | addition_time_embed_dim, flip_sin_to_cos, freq_shift |
| | ) |
| | self.add_embedding = TimestepEmbedding( |
| | projection_class_embeddings_input_dim, time_embed_dim |
| | ) |
| |
|
| | elif addition_embed_type is not None: |
| | raise ValueError( |
| | f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'." |
| | ) |
| |
|
| | |
| | self.controlnet_cond_embedding = ControlNetConditioningEmbedding( |
| | conditioning_embedding_channels=block_out_channels[0], |
| | block_out_channels=conditioning_embedding_out_channels, |
| | conditioning_channels=conditioning_channels, |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | num_trans_channel = 320 |
| | num_trans_head = 8 |
| | num_trans_layer = 1 |
| | num_proj_channel = 320 |
| | task_scale_factor = num_trans_channel**0.5 |
| |
|
| | self.task_embedding = nn.Parameter( |
| | task_scale_factor * torch.randn(num_control_type, num_trans_channel) |
| | ) |
| | self.transformer_layes = nn.Sequential( |
| | *[ |
| | ResidualAttentionBlock(num_trans_channel, num_trans_head) |
| | for _ in range(num_trans_layer) |
| | ] |
| | ) |
| | self.spatial_ch_projs = zero_module( |
| | nn.Linear(num_trans_channel, num_proj_channel) |
| | ) |
| | |
| |
|
| | |
| | |
| | |
| | self.control_type_proj = Timesteps( |
| | addition_time_embed_dim, flip_sin_to_cos, freq_shift |
| | ) |
| | self.control_add_embedding = TimestepEmbedding( |
| | addition_time_embed_dim * num_control_type, time_embed_dim |
| | ) |
| | |
| |
|
| | self.down_blocks = nn.ModuleList([]) |
| | self.controlnet_down_blocks = nn.ModuleList([]) |
| |
|
| | if isinstance(only_cross_attention, bool): |
| | only_cross_attention = [only_cross_attention] * len(down_block_types) |
| |
|
| | if isinstance(attention_head_dim, int): |
| | attention_head_dim = (attention_head_dim,) * len(down_block_types) |
| |
|
| | if isinstance(num_attention_heads, int): |
| | num_attention_heads = (num_attention_heads,) * len(down_block_types) |
| |
|
| | |
| | output_channel = block_out_channels[0] |
| |
|
| | controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | for i, down_block_type in enumerate(down_block_types): |
| | input_channel = output_channel |
| | output_channel = block_out_channels[i] |
| | is_final_block = i == len(block_out_channels) - 1 |
| |
|
| | down_block = get_down_block( |
| | down_block_type, |
| | num_layers=layers_per_block, |
| | transformer_layers_per_block=transformer_layers_per_block[i], |
| | in_channels=input_channel, |
| | out_channels=output_channel, |
| | temb_channels=time_embed_dim, |
| | add_downsample=not is_final_block, |
| | resnet_eps=norm_eps, |
| | resnet_act_fn=act_fn, |
| | resnet_groups=norm_num_groups, |
| | cross_attention_dim=cross_attention_dim, |
| | num_attention_heads=num_attention_heads[i], |
| | attention_head_dim=attention_head_dim[i] |
| | if attention_head_dim[i] is not None |
| | else output_channel, |
| | downsample_padding=downsample_padding, |
| | use_linear_projection=use_linear_projection, |
| | only_cross_attention=only_cross_attention[i], |
| | upcast_attention=upcast_attention, |
| | resnet_time_scale_shift=resnet_time_scale_shift, |
| | ) |
| | self.down_blocks.append(down_block) |
| |
|
| | for _ in range(layers_per_block): |
| | controlnet_block = nn.Conv2d( |
| | output_channel, output_channel, kernel_size=1 |
| | ) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | if not is_final_block: |
| | controlnet_block = nn.Conv2d( |
| | output_channel, output_channel, kernel_size=1 |
| | ) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | |
| | mid_block_channel = block_out_channels[-1] |
| |
|
| | controlnet_block = nn.Conv2d( |
| | mid_block_channel, mid_block_channel, kernel_size=1 |
| | ) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_mid_block = controlnet_block |
| |
|
| | self.mid_block = UNetMidBlock2DCrossAttn( |
| | transformer_layers_per_block=transformer_layers_per_block[-1], |
| | in_channels=mid_block_channel, |
| | temb_channels=time_embed_dim, |
| | resnet_eps=norm_eps, |
| | resnet_act_fn=act_fn, |
| | output_scale_factor=mid_block_scale_factor, |
| | resnet_time_scale_shift=resnet_time_scale_shift, |
| | cross_attention_dim=cross_attention_dim, |
| | num_attention_heads=num_attention_heads[-1], |
| | resnet_groups=norm_num_groups, |
| | use_linear_projection=use_linear_projection, |
| | upcast_attention=upcast_attention, |
| | ) |
| |
|
| | @classmethod |
| | def from_unet( |
| | cls, |
| | unet: UNet2DConditionModel, |
| | controlnet_conditioning_channel_order: str = "rgb", |
| | conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), |
| | load_weights_from_unet: bool = True, |
| | ): |
| | r""" |
| | Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. |
| | |
| | Parameters: |
| | unet (`UNet2DConditionModel`): |
| | The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied |
| | where applicable. |
| | """ |
| | transformer_layers_per_block = ( |
| | unet.config.transformer_layers_per_block |
| | if "transformer_layers_per_block" in unet.config |
| | else 1 |
| | ) |
| | encoder_hid_dim = ( |
| | unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None |
| | ) |
| | encoder_hid_dim_type = ( |
| | unet.config.encoder_hid_dim_type |
| | if "encoder_hid_dim_type" in unet.config |
| | else None |
| | ) |
| | addition_embed_type = ( |
| | unet.config.addition_embed_type |
| | if "addition_embed_type" in unet.config |
| | else None |
| | ) |
| | addition_time_embed_dim = ( |
| | unet.config.addition_time_embed_dim |
| | if "addition_time_embed_dim" in unet.config |
| | else None |
| | ) |
| |
|
| | controlnet = cls( |
| | encoder_hid_dim=encoder_hid_dim, |
| | encoder_hid_dim_type=encoder_hid_dim_type, |
| | addition_embed_type=addition_embed_type, |
| | addition_time_embed_dim=addition_time_embed_dim, |
| | transformer_layers_per_block=transformer_layers_per_block, |
| | |
| | in_channels=unet.config.in_channels, |
| | flip_sin_to_cos=unet.config.flip_sin_to_cos, |
| | freq_shift=unet.config.freq_shift, |
| | down_block_types=unet.config.down_block_types, |
| | only_cross_attention=unet.config.only_cross_attention, |
| | block_out_channels=unet.config.block_out_channels, |
| | layers_per_block=unet.config.layers_per_block, |
| | downsample_padding=unet.config.downsample_padding, |
| | mid_block_scale_factor=unet.config.mid_block_scale_factor, |
| | act_fn=unet.config.act_fn, |
| | norm_num_groups=unet.config.norm_num_groups, |
| | norm_eps=unet.config.norm_eps, |
| | cross_attention_dim=unet.config.cross_attention_dim, |
| | attention_head_dim=unet.config.attention_head_dim, |
| | num_attention_heads=unet.config.num_attention_heads, |
| | use_linear_projection=unet.config.use_linear_projection, |
| | class_embed_type=unet.config.class_embed_type, |
| | num_class_embeds=unet.config.num_class_embeds, |
| | upcast_attention=unet.config.upcast_attention, |
| | resnet_time_scale_shift=unet.config.resnet_time_scale_shift, |
| | projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, |
| | controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, |
| | conditioning_embedding_out_channels=conditioning_embedding_out_channels, |
| | ) |
| |
|
| | if load_weights_from_unet: |
| | controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) |
| | controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) |
| | controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) |
| |
|
| | if controlnet.class_embedding: |
| | controlnet.class_embedding.load_state_dict( |
| | unet.class_embedding.state_dict() |
| | ) |
| |
|
| | controlnet.down_blocks.load_state_dict( |
| | unet.down_blocks.state_dict(), strict=False |
| | ) |
| | controlnet.mid_block.load_state_dict( |
| | unet.mid_block.state_dict(), strict=False |
| | ) |
| |
|
| | return controlnet |
| |
|
| | @property |
| | |
| | def attn_processors(self) -> Dict[str, AttentionProcessor]: |
| | r""" |
| | Returns: |
| | `dict` of attention processors: A dictionary containing all attention processors used in the model with |
| | indexed by its weight name. |
| | """ |
| | |
| | processors = {} |
| |
|
| | def fn_recursive_add_processors( |
| | name: str, |
| | module: torch.nn.Module, |
| | processors: Dict[str, AttentionProcessor], |
| | ): |
| | if hasattr(module, "get_processor"): |
| | processors[f"{name}.processor"] = module.get_processor( |
| | return_deprecated_lora=True |
| | ) |
| |
|
| | for sub_name, child in module.named_children(): |
| | fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
| |
|
| | return processors |
| |
|
| | for name, module in self.named_children(): |
| | fn_recursive_add_processors(name, module, processors) |
| |
|
| | return processors |
| |
|
| | |
| | def set_attn_processor( |
| | self, |
| | processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], |
| | _remove_lora=False, |
| | ): |
| | r""" |
| | Sets the attention processor to use to compute attention. |
| | |
| | Parameters: |
| | processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
| | The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| | for **all** `Attention` layers. |
| | |
| | If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
| | processor. This is strongly recommended when setting trainable attention processors. |
| | |
| | """ |
| | count = len(self.attn_processors.keys()) |
| |
|
| | if isinstance(processor, dict) and len(processor) != count: |
| | raise ValueError( |
| | f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| | f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| | ) |
| |
|
| | def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
| | if hasattr(module, "set_processor"): |
| | if not isinstance(processor, dict): |
| | module.set_processor(processor, _remove_lora=_remove_lora) |
| | else: |
| | module.set_processor( |
| | processor.pop(f"{name}.processor"), _remove_lora=_remove_lora |
| | ) |
| |
|
| | for sub_name, child in module.named_children(): |
| | fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
| |
|
| | for name, module in self.named_children(): |
| | fn_recursive_attn_processor(name, module, processor) |
| |
|
| | |
| | def set_default_attn_processor(self): |
| | """ |
| | Disables custom attention processors and sets the default attention implementation. |
| | """ |
| | if all( |
| | proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS |
| | for proc in self.attn_processors.values() |
| | ): |
| | processor = AttnAddedKVProcessor() |
| | elif all( |
| | proc.__class__ in CROSS_ATTENTION_PROCESSORS |
| | for proc in self.attn_processors.values() |
| | ): |
| | processor = AttnProcessor() |
| | else: |
| | raise ValueError( |
| | f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" |
| | ) |
| |
|
| | self.set_attn_processor(processor, _remove_lora=True) |
| |
|
| | |
| | def set_attention_slice(self, slice_size): |
| | r""" |
| | Enable sliced attention computation. |
| | |
| | When this option is enabled, the attention module splits the input tensor in slices to compute attention in |
| | several steps. This is useful for saving some memory in exchange for a small decrease in speed. |
| | |
| | Args: |
| | slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): |
| | When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If |
| | `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is |
| | provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` |
| | must be a multiple of `slice_size`. |
| | """ |
| | sliceable_head_dims = [] |
| |
|
| | def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): |
| | if hasattr(module, "set_attention_slice"): |
| | sliceable_head_dims.append(module.sliceable_head_dim) |
| |
|
| | for child in module.children(): |
| | fn_recursive_retrieve_sliceable_dims(child) |
| |
|
| | |
| | for module in self.children(): |
| | fn_recursive_retrieve_sliceable_dims(module) |
| |
|
| | num_sliceable_layers = len(sliceable_head_dims) |
| |
|
| | if slice_size == "auto": |
| | |
| | |
| | slice_size = [dim // 2 for dim in sliceable_head_dims] |
| | elif slice_size == "max": |
| | |
| | slice_size = num_sliceable_layers * [1] |
| |
|
| | slice_size = ( |
| | num_sliceable_layers * [slice_size] |
| | if not isinstance(slice_size, list) |
| | else slice_size |
| | ) |
| |
|
| | if len(slice_size) != len(sliceable_head_dims): |
| | raise ValueError( |
| | f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" |
| | f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." |
| | ) |
| |
|
| | for i in range(len(slice_size)): |
| | size = slice_size[i] |
| | dim = sliceable_head_dims[i] |
| | if size is not None and size > dim: |
| | raise ValueError(f"size {size} has to be smaller or equal to {dim}.") |
| |
|
| | |
| | |
| | |
| | def fn_recursive_set_attention_slice( |
| | module: torch.nn.Module, slice_size: List[int] |
| | ): |
| | if hasattr(module, "set_attention_slice"): |
| | module.set_attention_slice(slice_size.pop()) |
| |
|
| | for child in module.children(): |
| | fn_recursive_set_attention_slice(child, slice_size) |
| |
|
| | reversed_slice_size = list(reversed(slice_size)) |
| | for module in self.children(): |
| | fn_recursive_set_attention_slice(module, reversed_slice_size) |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): |
| | module.gradient_checkpointing = value |
| |
|
| | def forward( |
| | self, |
| | sample: torch.FloatTensor, |
| | timestep: Union[torch.Tensor, float, int], |
| | encoder_hidden_states: torch.Tensor, |
| | controlnet_cond_list: torch.FloatTensor, |
| | conditioning_scale: float = 1.0, |
| | class_labels: Optional[torch.Tensor] = None, |
| | timestep_cond: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | guess_mode: bool = False, |
| | return_dict: bool = True, |
| | ) -> Union[ControlNetOutput, Tuple]: |
| | """ |
| | The [`ControlNetModel`] forward method. |
| | |
| | Args: |
| | sample (`torch.FloatTensor`): |
| | The noisy input tensor. |
| | timestep (`Union[torch.Tensor, float, int]`): |
| | The number of timesteps to denoise an input. |
| | encoder_hidden_states (`torch.Tensor`): |
| | The encoder hidden states. |
| | controlnet_cond (`torch.FloatTensor`): |
| | The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. |
| | conditioning_scale (`float`, defaults to `1.0`): |
| | The scale factor for ControlNet outputs. |
| | class_labels (`torch.Tensor`, *optional*, defaults to `None`): |
| | Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. |
| | timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): |
| | Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the |
| | timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep |
| | embeddings. |
| | attention_mask (`torch.Tensor`, *optional*, defaults to `None`): |
| | An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask |
| | is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large |
| | negative values to the attention scores corresponding to "discard" tokens. |
| | added_cond_kwargs (`dict`): |
| | Additional conditions for the Stable Diffusion XL UNet. |
| | cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): |
| | A kwargs dictionary that if specified is passed along to the `AttnProcessor`. |
| | guess_mode (`bool`, defaults to `False`): |
| | In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if |
| | you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. |
| | return_dict (`bool`, defaults to `True`): |
| | Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. |
| | |
| | Returns: |
| | [`~models.controlnet.ControlNetOutput`] **or** `tuple`: |
| | If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is |
| | returned where the first element is the sample tensor. |
| | """ |
| | |
| | channel_order = self.config.controlnet_conditioning_channel_order |
| |
|
| | if channel_order == "rgb": |
| | |
| | ... |
| | |
| | |
| | else: |
| | raise ValueError( |
| | f"unknown `controlnet_conditioning_channel_order`: {channel_order}" |
| | ) |
| |
|
| | |
| | if attention_mask is not None: |
| | attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 |
| | attention_mask = attention_mask.unsqueeze(1) |
| |
|
| | |
| | timesteps = timestep |
| | if not torch.is_tensor(timesteps): |
| | |
| | |
| | is_mps = sample.device.type == "mps" |
| | if isinstance(timestep, float): |
| | dtype = torch.float32 if is_mps else torch.float64 |
| | else: |
| | dtype = torch.int32 if is_mps else torch.int64 |
| | timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) |
| | elif len(timesteps.shape) == 0: |
| | timesteps = timesteps[None].to(sample.device) |
| |
|
| | |
| | timesteps = timesteps.expand(sample.shape[0]) |
| |
|
| | t_emb = self.time_proj(timesteps) |
| |
|
| | |
| | |
| | |
| | t_emb = t_emb.to(dtype=sample.dtype) |
| |
|
| | emb = self.time_embedding(t_emb, timestep_cond) |
| | aug_emb = None |
| |
|
| | if self.class_embedding is not None: |
| | if class_labels is None: |
| | raise ValueError( |
| | "class_labels should be provided when num_class_embeds > 0" |
| | ) |
| |
|
| | if self.config.class_embed_type == "timestep": |
| | class_labels = self.time_proj(class_labels) |
| |
|
| | class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) |
| | emb = emb + class_emb |
| |
|
| | if self.config.addition_embed_type is not None: |
| | if self.config.addition_embed_type == "text": |
| | aug_emb = self.add_embedding(encoder_hidden_states) |
| |
|
| | elif self.config.addition_embed_type == "text_time": |
| | if "text_embeds" not in added_cond_kwargs: |
| | raise ValueError( |
| | f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" |
| | ) |
| | text_embeds = added_cond_kwargs.get("text_embeds") |
| | if "time_ids" not in added_cond_kwargs: |
| | raise ValueError( |
| | f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" |
| | ) |
| | time_ids = added_cond_kwargs.get("time_ids") |
| | time_embeds = self.add_time_proj(time_ids.flatten()) |
| | time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) |
| |
|
| | add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) |
| | add_embeds = add_embeds.to(emb.dtype) |
| | aug_emb = self.add_embedding(add_embeds) |
| |
|
| | |
| | |
| | control_type = added_cond_kwargs.get("control_type") |
| | control_embeds = self.control_type_proj(control_type.flatten()) |
| | control_embeds = control_embeds.reshape((t_emb.shape[0], -1)) |
| | control_embeds = control_embeds.to(emb.dtype) |
| | control_emb = self.control_add_embedding(control_embeds) |
| | emb = emb + control_emb |
| | |
| |
|
| | emb = emb + aug_emb if aug_emb is not None else emb |
| |
|
| | |
| | sample = self.conv_in(sample) |
| | indices = torch.nonzero(control_type[0]) |
| |
|
| | |
| | |
| | |
| | inputs = [] |
| | condition_list = [] |
| |
|
| | for idx in range(indices.shape[0] + 1): |
| | if idx == indices.shape[0]: |
| | controlnet_cond = sample |
| | feat_seq = torch.mean(controlnet_cond, dim=(2, 3)) |
| | else: |
| | controlnet_cond = self.controlnet_cond_embedding( |
| | controlnet_cond_list[indices[idx][0]] |
| | ) |
| | feat_seq = torch.mean(controlnet_cond, dim=(2, 3)) |
| | feat_seq = feat_seq + self.task_embedding[indices[idx][0]] |
| |
|
| | inputs.append(feat_seq.unsqueeze(1)) |
| | condition_list.append(controlnet_cond) |
| |
|
| | x = torch.cat(inputs, dim=1) |
| | x = self.transformer_layes(x) |
| |
|
| | controlnet_cond_fuser = sample * 0.0 |
| | for idx in range(indices.shape[0]): |
| | alpha = self.spatial_ch_projs(x[:, idx]) |
| | alpha = alpha.unsqueeze(-1).unsqueeze(-1) |
| | controlnet_cond_fuser += condition_list[idx] + alpha |
| |
|
| | sample = sample + controlnet_cond_fuser |
| | |
| |
|
| | |
| | down_block_res_samples = (sample,) |
| | for downsample_block in self.down_blocks: |
| | if ( |
| | hasattr(downsample_block, "has_cross_attention") |
| | and downsample_block.has_cross_attention |
| | ): |
| | sample, res_samples = downsample_block( |
| | hidden_states=sample, |
| | temb=emb, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=attention_mask, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | ) |
| | else: |
| | sample, res_samples = downsample_block(hidden_states=sample, temb=emb) |
| |
|
| | down_block_res_samples += res_samples |
| |
|
| | |
| | if self.mid_block is not None: |
| | sample = self.mid_block( |
| | sample, |
| | emb, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=attention_mask, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | ) |
| |
|
| | |
| |
|
| | controlnet_down_block_res_samples = () |
| |
|
| | for down_block_res_sample, controlnet_block in zip( |
| | down_block_res_samples, self.controlnet_down_blocks |
| | ): |
| | down_block_res_sample = controlnet_block(down_block_res_sample) |
| | controlnet_down_block_res_samples = controlnet_down_block_res_samples + ( |
| | down_block_res_sample, |
| | ) |
| |
|
| | down_block_res_samples = controlnet_down_block_res_samples |
| |
|
| | mid_block_res_sample = self.controlnet_mid_block(sample) |
| |
|
| | |
| | if guess_mode and not self.config.global_pool_conditions: |
| | scales = torch.logspace( |
| | -1, 0, len(down_block_res_samples) + 1, device=sample.device |
| | ) |
| | scales = scales * conditioning_scale |
| | down_block_res_samples = [ |
| | sample * scale for sample, scale in zip(down_block_res_samples, scales) |
| | ] |
| | mid_block_res_sample = mid_block_res_sample * scales[-1] |
| | else: |
| | down_block_res_samples = [ |
| | sample * conditioning_scale for sample in down_block_res_samples |
| | ] |
| | mid_block_res_sample = mid_block_res_sample * conditioning_scale |
| |
|
| | if self.config.global_pool_conditions: |
| | down_block_res_samples = [ |
| | torch.mean(sample, dim=(2, 3), keepdim=True) |
| | for sample in down_block_res_samples |
| | ] |
| | mid_block_res_sample = torch.mean( |
| | mid_block_res_sample, dim=(2, 3), keepdim=True |
| | ) |
| |
|
| | if not return_dict: |
| | return (down_block_res_samples, mid_block_res_sample) |
| |
|
| | return ControlNetOutput( |
| | down_block_res_samples=down_block_res_samples, |
| | mid_block_res_sample=mid_block_res_sample, |
| | ) |
| |
|
| |
|
| | def zero_module(module): |
| | for p in module.parameters(): |
| | nn.init.zeros_(p) |
| | return module |
| |
|