Spaces:
Running
on
Zero
Running
on
Zero
| # Modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformers/transformer_flux2.py | |
| # Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import glob | |
| import inspect | |
| import json | |
| import os | |
| from typing import Any, Dict, List, Optional, Tuple, Union | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from diffusers.configuration_utils import ConfigMixin, register_to_config | |
| from diffusers.loaders import FromOriginalModelMixin | |
| from diffusers.models.attention_processor import Attention, AttentionProcessor | |
| from diffusers.models.embeddings import (TimestepEmbedding, Timesteps, | |
| apply_rotary_emb, | |
| get_1d_rotary_pos_embed) | |
| from diffusers.models.modeling_outputs import Transformer2DModelOutput | |
| from diffusers.models.modeling_utils import ModelMixin | |
| from diffusers.models.normalization import AdaLayerNormContinuous | |
| from diffusers.utils import (USE_PEFT_BACKEND, is_torch_npu_available, | |
| is_torch_version, logging, scale_lora_layers, | |
| unscale_lora_layers) | |
| from ..dist import (Flux2MultiGPUsAttnProcessor2_0, get_sequence_parallel_rank, | |
| get_sequence_parallel_world_size, get_sp_group) | |
| from .attention_utils import attention | |
| logger = logging.get_logger(__name__) # pylint: disable=invalid-name | |
| def _get_projections(attn: "Flux2Attention", hidden_states, encoder_hidden_states=None): | |
| query = attn.to_q(hidden_states) | |
| key = attn.to_k(hidden_states) | |
| value = attn.to_v(hidden_states) | |
| encoder_query = encoder_key = encoder_value = None | |
| if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: | |
| encoder_query = attn.add_q_proj(encoder_hidden_states) | |
| encoder_key = attn.add_k_proj(encoder_hidden_states) | |
| encoder_value = attn.add_v_proj(encoder_hidden_states) | |
| return query, key, value, encoder_query, encoder_key, encoder_value | |
| def _get_qkv_projections(attn: "Flux2Attention", hidden_states, encoder_hidden_states=None): | |
| return _get_projections(attn, hidden_states, encoder_hidden_states) | |
| def apply_rotary_emb( | |
| x: torch.Tensor, | |
| freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], | |
| use_real: bool = True, | |
| use_real_unbind_dim: int = -1, | |
| sequence_dim: int = 2, | |
| ) -> Tuple[torch.Tensor, torch.Tensor]: | |
| """ | |
| Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings | |
| to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are | |
| reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting | |
| tensors contain rotary embeddings and are returned as real tensors. | |
| Args: | |
| x (`torch.Tensor`): | |
| Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply | |
| freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) | |
| Returns: | |
| Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. | |
| """ | |
| if use_real: | |
| cos, sin = freqs_cis # [S, D] | |
| if sequence_dim == 2: | |
| cos = cos[None, None, :, :] | |
| sin = sin[None, None, :, :] | |
| elif sequence_dim == 1: | |
| cos = cos[None, :, None, :] | |
| sin = sin[None, :, None, :] | |
| else: | |
| raise ValueError(f"`sequence_dim={sequence_dim}` but should be 1 or 2.") | |
| cos, sin = cos.to(x.device), sin.to(x.device) | |
| if use_real_unbind_dim == -1: | |
| # Used for flux, cogvideox, hunyuan-dit | |
| x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, H, S, D//2] | |
| x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) | |
| elif use_real_unbind_dim == -2: | |
| # Used for Stable Audio, OmniGen, CogView4 and Cosmos | |
| x_real, x_imag = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) # [B, H, S, D//2] | |
| x_rotated = torch.cat([-x_imag, x_real], dim=-1) | |
| else: | |
| raise ValueError(f"`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.") | |
| out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) | |
| return out | |
| else: | |
| # used for lumina | |
| x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) | |
| freqs_cis = freqs_cis.unsqueeze(2) | |
| x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3) | |
| return x_out.type_as(x) | |
| class Flux2SwiGLU(nn.Module): | |
| """ | |
| Flux 2 uses a SwiGLU-style activation in the transformer feedforward sub-blocks, but with the linear projection | |
| layer fused into the first linear layer of the FF sub-block. Thus, this module has no trainable parameters. | |
| """ | |
| def __init__(self): | |
| super().__init__() | |
| self.gate_fn = nn.SiLU() | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| x1, x2 = x.chunk(2, dim=-1) | |
| x = self.gate_fn(x1) * x2 | |
| return x | |
| class Flux2FeedForward(nn.Module): | |
| def __init__( | |
| self, | |
| dim: int, | |
| dim_out: Optional[int] = None, | |
| mult: float = 3.0, | |
| inner_dim: Optional[int] = None, | |
| bias: bool = False, | |
| ): | |
| super().__init__() | |
| if inner_dim is None: | |
| inner_dim = int(dim * mult) | |
| dim_out = dim_out or dim | |
| # Flux2SwiGLU will reduce the dimension by half | |
| self.linear_in = nn.Linear(dim, inner_dim * 2, bias=bias) | |
| self.act_fn = Flux2SwiGLU() | |
| self.linear_out = nn.Linear(inner_dim, dim_out, bias=bias) | |
| def forward(self, x: torch.Tensor) -> torch.Tensor: | |
| x = self.linear_in(x) | |
| x = self.act_fn(x) | |
| x = self.linear_out(x) | |
| return x | |
| class Flux2AttnProcessor: | |
| _attention_backend = None | |
| _parallel_config = None | |
| def __init__(self): | |
| if not hasattr(F, "scaled_dot_product_attention"): | |
| raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") | |
| def __call__( | |
| self, | |
| attn: Union["Flux2Attention", "Flux2ParallelSelfAttention"], | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| image_rotary_emb: Optional[torch.Tensor] = None, | |
| text_seq_len: int = None, | |
| ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: | |
| """ | |
| Unified processor for both Flux2Attention and Flux2ParallelSelfAttention. | |
| Args: | |
| attn: Attention module (either Flux2Attention or Flux2ParallelSelfAttention) | |
| hidden_states: Input hidden states | |
| encoder_hidden_states: Optional encoder hidden states (only for Flux2Attention) | |
| attention_mask: Optional attention mask | |
| image_rotary_emb: Optional rotary embeddings | |
| Returns: | |
| For Flux2Attention with encoder_hidden_states: (hidden_states, encoder_hidden_states) | |
| For Flux2Attention without encoder_hidden_states: hidden_states | |
| For Flux2ParallelSelfAttention: hidden_states | |
| """ | |
| # Determine which type of attention we're processing | |
| is_parallel_self_attn = hasattr(attn, 'to_qkv_mlp_proj') and attn.to_qkv_mlp_proj is not None | |
| if is_parallel_self_attn: | |
| # ============================================ | |
| # Parallel Self-Attention Path (with MLP) | |
| # ============================================ | |
| # Parallel in (QKV + MLP in) projection | |
| hidden_states = attn.to_qkv_mlp_proj(hidden_states) | |
| qkv, mlp_hidden_states = torch.split( | |
| hidden_states, [3 * attn.inner_dim, attn.mlp_hidden_dim * attn.mlp_mult_factor], dim=-1 | |
| ) | |
| # Handle the attention logic | |
| query, key, value = qkv.chunk(3, dim=-1) | |
| else: | |
| # ============================================ | |
| # Standard Attention Path (possibly with encoder) | |
| # ============================================ | |
| query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( | |
| attn, hidden_states, encoder_hidden_states | |
| ) | |
| # Common processing for query, key, value | |
| query = query.unflatten(-1, (attn.heads, -1)) | |
| key = key.unflatten(-1, (attn.heads, -1)) | |
| value = value.unflatten(-1, (attn.heads, -1)) | |
| query = attn.norm_q(query) | |
| key = attn.norm_k(key) | |
| # Handle encoder projections (only for standard attention) | |
| if not is_parallel_self_attn and attn.added_kv_proj_dim is not None: | |
| encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) | |
| encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) | |
| encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) | |
| encoder_query = attn.norm_added_q(encoder_query) | |
| encoder_key = attn.norm_added_k(encoder_key) | |
| query = torch.cat([encoder_query, query], dim=1) | |
| key = torch.cat([encoder_key, key], dim=1) | |
| value = torch.cat([encoder_value, value], dim=1) | |
| # Apply rotary embeddings | |
| if image_rotary_emb is not None: | |
| query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) | |
| key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) | |
| # Perform attention | |
| hidden_states = attention( | |
| query, key, value, attn_mask=attention_mask, | |
| ) | |
| hidden_states = hidden_states.flatten(2, 3) | |
| hidden_states = hidden_states.to(query.dtype) | |
| if is_parallel_self_attn: | |
| # ============================================ | |
| # Parallel Self-Attention Output Path | |
| # ============================================ | |
| # Handle the feedforward (FF) logic | |
| mlp_hidden_states = attn.mlp_act_fn(mlp_hidden_states) | |
| # Concatenate and parallel output projection | |
| hidden_states = torch.cat([hidden_states, mlp_hidden_states], dim=-1) | |
| hidden_states = attn.to_out(hidden_states) | |
| return hidden_states | |
| else: | |
| # ============================================ | |
| # Standard Attention Output Path | |
| # ============================================ | |
| # Split encoder and latent hidden states if encoder was used | |
| if encoder_hidden_states is not None: | |
| encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( | |
| [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 | |
| ) | |
| encoder_hidden_states = attn.to_add_out(encoder_hidden_states) | |
| # Project output | |
| hidden_states = attn.to_out[0](hidden_states) | |
| hidden_states = attn.to_out[1](hidden_states) | |
| if encoder_hidden_states is not None: | |
| return hidden_states, encoder_hidden_states | |
| else: | |
| return hidden_states | |
| class Flux2Attention(torch.nn.Module): | |
| _default_processor_cls = Flux2AttnProcessor | |
| _available_processors = [Flux2AttnProcessor] | |
| def __init__( | |
| self, | |
| query_dim: int, | |
| heads: int = 8, | |
| dim_head: int = 64, | |
| dropout: float = 0.0, | |
| bias: bool = False, | |
| added_kv_proj_dim: Optional[int] = None, | |
| added_proj_bias: Optional[bool] = True, | |
| out_bias: bool = True, | |
| eps: float = 1e-5, | |
| out_dim: int = None, | |
| elementwise_affine: bool = True, | |
| processor=None, | |
| ): | |
| super().__init__() | |
| self.head_dim = dim_head | |
| self.inner_dim = out_dim if out_dim is not None else dim_head * heads | |
| self.query_dim = query_dim | |
| self.out_dim = out_dim if out_dim is not None else query_dim | |
| self.heads = out_dim // dim_head if out_dim is not None else heads | |
| self.use_bias = bias | |
| self.dropout = dropout | |
| self.added_kv_proj_dim = added_kv_proj_dim | |
| self.added_proj_bias = added_proj_bias | |
| self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) | |
| self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) | |
| self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) | |
| # QK Norm | |
| self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) | |
| self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) | |
| self.to_out = torch.nn.ModuleList([]) | |
| self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) | |
| self.to_out.append(torch.nn.Dropout(dropout)) | |
| if added_kv_proj_dim is not None: | |
| self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) | |
| self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) | |
| self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) | |
| self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) | |
| self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) | |
| self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) | |
| if processor is None: | |
| processor = self._default_processor_cls() | |
| self.set_processor(processor) | |
| def set_processor(self, processor: AttentionProcessor) -> None: | |
| """ | |
| Set the attention processor to use. | |
| Args: | |
| processor (`AttnProcessor`): | |
| The attention processor to use. | |
| """ | |
| # if current processor is in `self._modules` and if passed `processor` is not, we need to | |
| # pop `processor` from `self._modules` | |
| if ( | |
| hasattr(self, "processor") | |
| and isinstance(self.processor, torch.nn.Module) | |
| and not isinstance(processor, torch.nn.Module) | |
| ): | |
| logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") | |
| self._modules.pop("processor") | |
| self.processor = processor | |
| def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": | |
| """ | |
| Get the attention processor in use. | |
| Args: | |
| return_deprecated_lora (`bool`, *optional*, defaults to `False`): | |
| Set to `True` to return the deprecated LoRA attention processor. | |
| Returns: | |
| "AttentionProcessor": The attention processor in use. | |
| """ | |
| if not return_deprecated_lora: | |
| return self.processor | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| image_rotary_emb: Optional[torch.Tensor] = None, | |
| **kwargs, | |
| ) -> torch.Tensor: | |
| attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) | |
| unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters] | |
| if len(unused_kwargs) > 0: | |
| logger.warning( | |
| f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." | |
| ) | |
| kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} | |
| return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) | |
| class Flux2ParallelSelfAttention(torch.nn.Module): | |
| """ | |
| Flux 2 parallel self-attention for the Flux 2 single-stream transformer blocks. | |
| This implements a parallel transformer block, where the attention QKV projections are fused to the feedforward (FF) | |
| input projections, and the attention output projections are fused to the FF output projections. See the [ViT-22B | |
| paper](https://arxiv.org/abs/2302.05442) for a visual depiction of this type of transformer block. | |
| """ | |
| _default_processor_cls = Flux2AttnProcessor | |
| _available_processors = [Flux2AttnProcessor] | |
| # Does not support QKV fusion as the QKV projections are always fused | |
| _supports_qkv_fusion = False | |
| def __init__( | |
| self, | |
| query_dim: int, | |
| heads: int = 8, | |
| dim_head: int = 64, | |
| dropout: float = 0.0, | |
| bias: bool = False, | |
| out_bias: bool = True, | |
| eps: float = 1e-5, | |
| out_dim: int = None, | |
| elementwise_affine: bool = True, | |
| mlp_ratio: float = 4.0, | |
| mlp_mult_factor: int = 2, | |
| processor=None, | |
| ): | |
| super().__init__() | |
| self.head_dim = dim_head | |
| self.inner_dim = out_dim if out_dim is not None else dim_head * heads | |
| self.query_dim = query_dim | |
| self.out_dim = out_dim if out_dim is not None else query_dim | |
| self.heads = out_dim // dim_head if out_dim is not None else heads | |
| self.use_bias = bias | |
| self.dropout = dropout | |
| self.mlp_ratio = mlp_ratio | |
| self.mlp_hidden_dim = int(query_dim * self.mlp_ratio) | |
| self.mlp_mult_factor = mlp_mult_factor | |
| # Fused QKV projections + MLP input projection | |
| self.to_qkv_mlp_proj = torch.nn.Linear( | |
| self.query_dim, self.inner_dim * 3 + self.mlp_hidden_dim * self.mlp_mult_factor, bias=bias | |
| ) | |
| self.mlp_act_fn = Flux2SwiGLU() | |
| # QK Norm | |
| self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) | |
| self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) | |
| # Fused attention output projection + MLP output projection | |
| self.to_out = torch.nn.Linear(self.inner_dim + self.mlp_hidden_dim, self.out_dim, bias=out_bias) | |
| if processor is None: | |
| processor = self._default_processor_cls() | |
| self.set_processor(processor) | |
| def set_processor(self, processor: AttentionProcessor) -> None: | |
| """ | |
| Set the attention processor to use. | |
| Args: | |
| processor (`AttnProcessor`): | |
| The attention processor to use. | |
| """ | |
| # if current processor is in `self._modules` and if passed `processor` is not, we need to | |
| # pop `processor` from `self._modules` | |
| if ( | |
| hasattr(self, "processor") | |
| and isinstance(self.processor, torch.nn.Module) | |
| and not isinstance(processor, torch.nn.Module) | |
| ): | |
| logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") | |
| self._modules.pop("processor") | |
| self.processor = processor | |
| def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": | |
| """ | |
| Get the attention processor in use. | |
| Args: | |
| return_deprecated_lora (`bool`, *optional*, defaults to `False`): | |
| Set to `True` to return the deprecated LoRA attention processor. | |
| Returns: | |
| "AttentionProcessor": The attention processor in use. | |
| """ | |
| if not return_deprecated_lora: | |
| return self.processor | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: Optional[torch.Tensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| image_rotary_emb: Optional[torch.Tensor] = None, | |
| **kwargs, | |
| ) -> torch.Tensor: | |
| attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) | |
| unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters] | |
| if len(unused_kwargs) > 0: | |
| logger.warning( | |
| f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." | |
| ) | |
| kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} | |
| return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) | |
| class Flux2SingleTransformerBlock(nn.Module): | |
| def __init__( | |
| self, | |
| dim: int, | |
| num_attention_heads: int, | |
| attention_head_dim: int, | |
| mlp_ratio: float = 3.0, | |
| eps: float = 1e-6, | |
| bias: bool = False, | |
| ): | |
| super().__init__() | |
| self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) | |
| # Note that the MLP in/out linear layers are fused with the attention QKV/out projections, respectively; this | |
| # is often called a "parallel" transformer block. See the [ViT-22B paper](https://arxiv.org/abs/2302.05442) | |
| # for a visual depiction of this type of transformer block. | |
| self.attn = Flux2ParallelSelfAttention( | |
| query_dim=dim, | |
| dim_head=attention_head_dim, | |
| heads=num_attention_heads, | |
| out_dim=dim, | |
| bias=bias, | |
| out_bias=bias, | |
| eps=eps, | |
| mlp_ratio=mlp_ratio, | |
| mlp_mult_factor=2, | |
| processor=Flux2AttnProcessor(), | |
| ) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: Optional[torch.Tensor], | |
| temb_mod_params: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], | |
| image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, | |
| joint_attention_kwargs: Optional[Dict[str, Any]] = None, | |
| ) -> Tuple[torch.Tensor, torch.Tensor]: | |
| # If encoder_hidden_states is None, hidden_states is assumed to have encoder_hidden_states already | |
| # concatenated | |
| if encoder_hidden_states is not None: | |
| text_seq_len = encoder_hidden_states.shape[1] | |
| hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) | |
| mod_shift, mod_scale, mod_gate = temb_mod_params | |
| norm_hidden_states = self.norm(hidden_states) | |
| norm_hidden_states = (1 + mod_scale) * norm_hidden_states + mod_shift | |
| joint_attention_kwargs = joint_attention_kwargs or {} | |
| attn_output = self.attn( | |
| hidden_states=norm_hidden_states, | |
| image_rotary_emb=image_rotary_emb, | |
| text_seq_len=text_seq_len, | |
| **joint_attention_kwargs, | |
| ) | |
| hidden_states = hidden_states + mod_gate * attn_output | |
| if hidden_states.dtype == torch.float16: | |
| hidden_states = hidden_states.clip(-65504, 65504) | |
| encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] | |
| return encoder_hidden_states, hidden_states | |
| class Flux2TransformerBlock(nn.Module): | |
| def __init__( | |
| self, | |
| dim: int, | |
| num_attention_heads: int, | |
| attention_head_dim: int, | |
| mlp_ratio: float = 3.0, | |
| eps: float = 1e-6, | |
| bias: bool = False, | |
| ): | |
| super().__init__() | |
| self.mlp_hidden_dim = int(dim * mlp_ratio) | |
| self.norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) | |
| self.norm1_context = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) | |
| self.attn = Flux2Attention( | |
| query_dim=dim, | |
| added_kv_proj_dim=dim, | |
| dim_head=attention_head_dim, | |
| heads=num_attention_heads, | |
| out_dim=dim, | |
| bias=bias, | |
| added_proj_bias=bias, | |
| out_bias=bias, | |
| eps=eps, | |
| processor=Flux2AttnProcessor(), | |
| ) | |
| self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) | |
| self.ff = Flux2FeedForward(dim=dim, dim_out=dim, mult=mlp_ratio, bias=bias) | |
| self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) | |
| self.ff_context = Flux2FeedForward(dim=dim, dim_out=dim, mult=mlp_ratio, bias=bias) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: torch.Tensor, | |
| temb_mod_params_img: Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], ...], | |
| temb_mod_params_txt: Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], ...], | |
| image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, | |
| joint_attention_kwargs: Optional[Dict[str, Any]] = None, | |
| ) -> Tuple[torch.Tensor, torch.Tensor]: | |
| joint_attention_kwargs = joint_attention_kwargs or {} | |
| # Modulation parameters shape: [1, 1, self.dim] | |
| (shift_msa, scale_msa, gate_msa), (shift_mlp, scale_mlp, gate_mlp) = temb_mod_params_img | |
| (c_shift_msa, c_scale_msa, c_gate_msa), (c_shift_mlp, c_scale_mlp, c_gate_mlp) = temb_mod_params_txt | |
| # Img stream | |
| norm_hidden_states = self.norm1(hidden_states) | |
| norm_hidden_states = (1 + scale_msa) * norm_hidden_states + shift_msa | |
| # Conditioning txt stream | |
| norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) | |
| norm_encoder_hidden_states = (1 + c_scale_msa) * norm_encoder_hidden_states + c_shift_msa | |
| # Attention on concatenated img + txt stream | |
| attention_outputs = self.attn( | |
| hidden_states=norm_hidden_states, | |
| encoder_hidden_states=norm_encoder_hidden_states, | |
| image_rotary_emb=image_rotary_emb, | |
| **joint_attention_kwargs, | |
| ) | |
| attn_output, context_attn_output = attention_outputs | |
| # Process attention outputs for the image stream (`hidden_states`). | |
| attn_output = gate_msa * attn_output | |
| hidden_states = hidden_states + attn_output | |
| norm_hidden_states = self.norm2(hidden_states) | |
| norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp | |
| ff_output = self.ff(norm_hidden_states) | |
| hidden_states = hidden_states + gate_mlp * ff_output | |
| # Process attention outputs for the text stream (`encoder_hidden_states`). | |
| context_attn_output = c_gate_msa * context_attn_output | |
| encoder_hidden_states = encoder_hidden_states + context_attn_output | |
| norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) | |
| norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp) + c_shift_mlp | |
| context_ff_output = self.ff_context(norm_encoder_hidden_states) | |
| encoder_hidden_states = encoder_hidden_states + c_gate_mlp * context_ff_output | |
| if encoder_hidden_states.dtype == torch.float16: | |
| encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) | |
| return encoder_hidden_states, hidden_states | |
| class Flux2PosEmbed(nn.Module): | |
| # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 | |
| def __init__(self, theta: int, axes_dim: List[int]): | |
| super().__init__() | |
| self.theta = theta | |
| self.axes_dim = axes_dim | |
| def forward(self, ids: torch.Tensor) -> torch.Tensor: | |
| # Expected ids shape: [S, len(self.axes_dim)] | |
| cos_out = [] | |
| sin_out = [] | |
| pos = ids.float() | |
| is_mps = ids.device.type == "mps" | |
| is_npu = ids.device.type == "npu" | |
| freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64 | |
| # Unlike Flux 1, loop over len(self.axes_dim) rather than ids.shape[-1] | |
| for i in range(len(self.axes_dim)): | |
| cos, sin = get_1d_rotary_pos_embed( | |
| self.axes_dim[i], | |
| pos[..., i], | |
| theta=self.theta, | |
| repeat_interleave_real=True, | |
| use_real=True, | |
| freqs_dtype=freqs_dtype, | |
| ) | |
| cos_out.append(cos) | |
| sin_out.append(sin) | |
| freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) | |
| freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) | |
| return freqs_cos, freqs_sin | |
| class Flux2TimestepGuidanceEmbeddings(nn.Module): | |
| def __init__(self, in_channels: int = 256, embedding_dim: int = 6144, bias: bool = False): | |
| super().__init__() | |
| self.time_proj = Timesteps(num_channels=in_channels, flip_sin_to_cos=True, downscale_freq_shift=0) | |
| self.timestep_embedder = TimestepEmbedding( | |
| in_channels=in_channels, time_embed_dim=embedding_dim, sample_proj_bias=bias | |
| ) | |
| self.guidance_embedder = TimestepEmbedding( | |
| in_channels=in_channels, time_embed_dim=embedding_dim, sample_proj_bias=bias | |
| ) | |
| def forward(self, timestep: torch.Tensor, guidance: torch.Tensor) -> torch.Tensor: | |
| timesteps_proj = self.time_proj(timestep) | |
| timesteps_emb = self.timestep_embedder(timesteps_proj.to(timestep.dtype)) # (N, D) | |
| guidance_proj = self.time_proj(guidance) | |
| guidance_emb = self.guidance_embedder(guidance_proj.to(guidance.dtype)) # (N, D) | |
| time_guidance_emb = timesteps_emb + guidance_emb | |
| return time_guidance_emb | |
| class Flux2Modulation(nn.Module): | |
| def __init__(self, dim: int, mod_param_sets: int = 2, bias: bool = False): | |
| super().__init__() | |
| self.mod_param_sets = mod_param_sets | |
| self.linear = nn.Linear(dim, dim * 3 * self.mod_param_sets, bias=bias) | |
| self.act_fn = nn.SiLU() | |
| def forward(self, temb: torch.Tensor) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], ...]: | |
| mod = self.act_fn(temb) | |
| mod = self.linear(mod) | |
| if mod.ndim == 2: | |
| mod = mod.unsqueeze(1) | |
| mod_params = torch.chunk(mod, 3 * self.mod_param_sets, dim=-1) | |
| # Return tuple of 3-tuples of modulation params shift/scale/gate | |
| return tuple(mod_params[3 * i : 3 * (i + 1)] for i in range(self.mod_param_sets)) | |
| class Flux2Transformer2DModel( | |
| ModelMixin, | |
| ConfigMixin, | |
| FromOriginalModelMixin, | |
| ): | |
| """ | |
| The Transformer model introduced in Flux 2. | |
| Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ | |
| Args: | |
| patch_size (`int`, defaults to `1`): | |
| Patch size to turn the input data into small patches. | |
| in_channels (`int`, defaults to `128`): | |
| The number of channels in the input. | |
| out_channels (`int`, *optional*, defaults to `None`): | |
| The number of channels in the output. If not specified, it defaults to `in_channels`. | |
| num_layers (`int`, defaults to `8`): | |
| The number of layers of dual stream DiT blocks to use. | |
| num_single_layers (`int`, defaults to `48`): | |
| The number of layers of single stream DiT blocks to use. | |
| attention_head_dim (`int`, defaults to `128`): | |
| The number of dimensions to use for each attention head. | |
| num_attention_heads (`int`, defaults to `48`): | |
| The number of attention heads to use. | |
| joint_attention_dim (`int`, defaults to `15360`): | |
| The number of dimensions to use for the joint attention (embedding/channel dimension of | |
| `encoder_hidden_states`). | |
| pooled_projection_dim (`int`, defaults to `768`): | |
| The number of dimensions to use for the pooled projection. | |
| guidance_embeds (`bool`, defaults to `True`): | |
| Whether to use guidance embeddings for guidance-distilled variant of the model. | |
| axes_dims_rope (`Tuple[int]`, defaults to `(32, 32, 32, 32)`): | |
| The dimensions to use for the rotary positional embeddings. | |
| """ | |
| _supports_gradient_checkpointing = True | |
| # _no_split_modules = ["Flux2TransformerBlock", "Flux2SingleTransformerBlock"] | |
| # _skip_layerwise_casting_patterns = ["pos_embed", "norm"] | |
| # _repeated_blocks = ["Flux2TransformerBlock", "Flux2SingleTransformerBlock"] | |
| def __init__( | |
| self, | |
| patch_size: int = 1, | |
| in_channels: int = 128, | |
| out_channels: Optional[int] = None, | |
| num_layers: int = 8, | |
| num_single_layers: int = 48, | |
| attention_head_dim: int = 128, | |
| num_attention_heads: int = 48, | |
| joint_attention_dim: int = 15360, | |
| timestep_guidance_channels: int = 256, | |
| mlp_ratio: float = 3.0, | |
| axes_dims_rope: Tuple[int, ...] = (32, 32, 32, 32), | |
| rope_theta: int = 2000, | |
| eps: float = 1e-6, | |
| ): | |
| super().__init__() | |
| self.out_channels = out_channels or in_channels | |
| self.inner_dim = num_attention_heads * attention_head_dim | |
| # 1. Sinusoidal positional embedding for RoPE on image and text tokens | |
| self.pos_embed = Flux2PosEmbed(theta=rope_theta, axes_dim=axes_dims_rope) | |
| # 2. Combined timestep + guidance embedding | |
| self.time_guidance_embed = Flux2TimestepGuidanceEmbeddings( | |
| in_channels=timestep_guidance_channels, embedding_dim=self.inner_dim, bias=False | |
| ) | |
| # 3. Modulation (double stream and single stream blocks share modulation parameters, resp.) | |
| # Two sets of shift/scale/gate modulation parameters for the double stream attn and FF sub-blocks | |
| self.double_stream_modulation_img = Flux2Modulation(self.inner_dim, mod_param_sets=2, bias=False) | |
| self.double_stream_modulation_txt = Flux2Modulation(self.inner_dim, mod_param_sets=2, bias=False) | |
| # Only one set of modulation parameters as the attn and FF sub-blocks are run in parallel for single stream | |
| self.single_stream_modulation = Flux2Modulation(self.inner_dim, mod_param_sets=1, bias=False) | |
| # 4. Input projections | |
| self.x_embedder = nn.Linear(in_channels, self.inner_dim, bias=False) | |
| self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim, bias=False) | |
| # 5. Double Stream Transformer Blocks | |
| self.transformer_blocks = nn.ModuleList( | |
| [ | |
| Flux2TransformerBlock( | |
| dim=self.inner_dim, | |
| num_attention_heads=num_attention_heads, | |
| attention_head_dim=attention_head_dim, | |
| mlp_ratio=mlp_ratio, | |
| eps=eps, | |
| bias=False, | |
| ) | |
| for _ in range(num_layers) | |
| ] | |
| ) | |
| # 6. Single Stream Transformer Blocks | |
| self.single_transformer_blocks = nn.ModuleList( | |
| [ | |
| Flux2SingleTransformerBlock( | |
| dim=self.inner_dim, | |
| num_attention_heads=num_attention_heads, | |
| attention_head_dim=attention_head_dim, | |
| mlp_ratio=mlp_ratio, | |
| eps=eps, | |
| bias=False, | |
| ) | |
| for _ in range(num_single_layers) | |
| ] | |
| ) | |
| # 7. Output layers | |
| self.norm_out = AdaLayerNormContinuous( | |
| self.inner_dim, self.inner_dim, elementwise_affine=False, eps=eps, bias=False | |
| ) | |
| self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False) | |
| self.gradient_checkpointing = False | |
| self.sp_world_size = 1 | |
| self.sp_world_rank = 0 | |
| def _set_gradient_checkpointing(self, *args, **kwargs): | |
| if "value" in kwargs: | |
| self.gradient_checkpointing = kwargs["value"] | |
| elif "enable" in kwargs: | |
| self.gradient_checkpointing = kwargs["enable"] | |
| else: | |
| raise ValueError("Invalid set gradient checkpointing") | |
| def enable_multi_gpus_inference(self,): | |
| self.sp_world_size = get_sequence_parallel_world_size() | |
| self.sp_world_rank = get_sequence_parallel_rank() | |
| self.all_gather = get_sp_group().all_gather | |
| self.set_attn_processor(Flux2MultiGPUsAttnProcessor2_0()) | |
| # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors | |
| def attn_processors(self) -> Dict[str, AttentionProcessor]: | |
| r""" | |
| Returns: | |
| `dict` of attention processors: A dictionary containing all attention processors used in the model with | |
| indexed by its weight name. | |
| """ | |
| # set recursively | |
| processors = {} | |
| def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): | |
| if hasattr(module, "get_processor"): | |
| processors[f"{name}.processor"] = module.get_processor() | |
| for sub_name, child in module.named_children(): | |
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) | |
| return processors | |
| for name, module in self.named_children(): | |
| fn_recursive_add_processors(name, module, processors) | |
| return processors | |
| # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor | |
| def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): | |
| r""" | |
| Sets the attention processor to use to compute attention. | |
| Parameters: | |
| processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): | |
| The instantiated processor class or a dictionary of processor classes that will be set as the processor | |
| for **all** `Attention` layers. | |
| If `processor` is a dict, the key needs to define the path to the corresponding cross attention | |
| processor. This is strongly recommended when setting trainable attention processors. | |
| """ | |
| count = len(self.attn_processors.keys()) | |
| if isinstance(processor, dict) and len(processor) != count: | |
| raise ValueError( | |
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" | |
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes." | |
| ) | |
| def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): | |
| if hasattr(module, "set_processor"): | |
| if not isinstance(processor, dict): | |
| module.set_processor(processor) | |
| else: | |
| module.set_processor(processor.pop(f"{name}.processor")) | |
| for sub_name, child in module.named_children(): | |
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) | |
| for name, module in self.named_children(): | |
| fn_recursive_attn_processor(name, module, processor) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: torch.Tensor = None, | |
| timestep: torch.LongTensor = None, | |
| img_ids: torch.Tensor = None, | |
| txt_ids: torch.Tensor = None, | |
| guidance: torch.Tensor = None, | |
| joint_attention_kwargs: Optional[Dict[str, Any]] = None, | |
| return_dict: bool = True, | |
| ) -> Union[torch.Tensor, Transformer2DModelOutput]: | |
| """ | |
| The [`FluxTransformer2DModel`] forward method. | |
| Args: | |
| hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): | |
| Input `hidden_states`. | |
| encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): | |
| Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. | |
| timestep ( `torch.LongTensor`): | |
| Used to indicate denoising step. | |
| block_controlnet_hidden_states: (`list` of `torch.Tensor`): | |
| A list of tensors that if specified are added to the residuals of transformer blocks. | |
| joint_attention_kwargs (`dict`, *optional*): | |
| A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | |
| `self.processor` in | |
| [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain | |
| tuple. | |
| Returns: | |
| If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a | |
| `tuple` where the first element is the sample tensor. | |
| """ | |
| # 0. Handle input arguments | |
| if joint_attention_kwargs is not None: | |
| joint_attention_kwargs = joint_attention_kwargs.copy() | |
| lora_scale = joint_attention_kwargs.pop("scale", 1.0) | |
| else: | |
| lora_scale = 1.0 | |
| num_txt_tokens = encoder_hidden_states.shape[1] | |
| # 1. Calculate timestep embedding and modulation parameters | |
| timestep = timestep.to(hidden_states.dtype) * 1000 | |
| guidance = guidance.to(hidden_states.dtype) * 1000 | |
| temb = self.time_guidance_embed(timestep, guidance) | |
| double_stream_mod_img = self.double_stream_modulation_img(temb) | |
| double_stream_mod_txt = self.double_stream_modulation_txt(temb) | |
| single_stream_mod = self.single_stream_modulation(temb)[0] | |
| # 2. Input projection for image (hidden_states) and conditioning text (encoder_hidden_states) | |
| hidden_states = self.x_embedder(hidden_states) | |
| encoder_hidden_states = self.context_embedder(encoder_hidden_states) | |
| # 3. Calculate RoPE embeddings from image and text tokens | |
| # NOTE: the below logic means that we can't support batched inference with images of different resolutions or | |
| # text prompts of differents lengths. Is this a use case we want to support? | |
| if img_ids.ndim == 3: | |
| img_ids = img_ids[0] | |
| if txt_ids.ndim == 3: | |
| txt_ids = txt_ids[0] | |
| if is_torch_npu_available(): | |
| freqs_cos_image, freqs_sin_image = self.pos_embed(img_ids.cpu()) | |
| image_rotary_emb = (freqs_cos_image.npu(), freqs_sin_image.npu()) | |
| freqs_cos_text, freqs_sin_text = self.pos_embed(txt_ids.cpu()) | |
| text_rotary_emb = (freqs_cos_text.npu(), freqs_sin_text.npu()) | |
| else: | |
| image_rotary_emb = self.pos_embed(img_ids) | |
| text_rotary_emb = self.pos_embed(txt_ids) | |
| concat_rotary_emb = ( | |
| torch.cat([text_rotary_emb[0], image_rotary_emb[0]], dim=0), | |
| torch.cat([text_rotary_emb[1], image_rotary_emb[1]], dim=0), | |
| ) | |
| # Context Parallel | |
| if self.sp_world_size > 1: | |
| hidden_states = torch.chunk(hidden_states, self.sp_world_size, dim=1)[self.sp_world_rank] | |
| if concat_rotary_emb is not None: | |
| txt_rotary_emb = ( | |
| concat_rotary_emb[0][:encoder_hidden_states.shape[1]], | |
| concat_rotary_emb[1][:encoder_hidden_states.shape[1]] | |
| ) | |
| concat_rotary_emb = ( | |
| torch.chunk(concat_rotary_emb[0][encoder_hidden_states.shape[1]:], self.sp_world_size, dim=0)[self.sp_world_rank], | |
| torch.chunk(concat_rotary_emb[1][encoder_hidden_states.shape[1]:], self.sp_world_size, dim=0)[self.sp_world_rank], | |
| ) | |
| concat_rotary_emb = [torch.cat([_txt_rotary_emb, _image_rotary_emb], dim=0) \ | |
| for _txt_rotary_emb, _image_rotary_emb in zip(txt_rotary_emb, concat_rotary_emb)] | |
| # 4. Double Stream Transformer Blocks | |
| for index_block, block in enumerate(self.transformer_blocks): | |
| if torch.is_grad_enabled() and self.gradient_checkpointing: | |
| def create_custom_forward(module): | |
| def custom_forward(*inputs): | |
| return module(*inputs) | |
| return custom_forward | |
| ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} | |
| encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( | |
| create_custom_forward(block), | |
| hidden_states, | |
| encoder_hidden_states, | |
| double_stream_mod_img, | |
| double_stream_mod_txt, | |
| concat_rotary_emb, | |
| joint_attention_kwargs, | |
| **ckpt_kwargs, | |
| ) | |
| else: | |
| encoder_hidden_states, hidden_states = block( | |
| hidden_states=hidden_states, | |
| encoder_hidden_states=encoder_hidden_states, | |
| temb_mod_params_img=double_stream_mod_img, | |
| temb_mod_params_txt=double_stream_mod_txt, | |
| image_rotary_emb=concat_rotary_emb, | |
| joint_attention_kwargs=joint_attention_kwargs, | |
| ) | |
| # 5. Single Stream Transformer Blocks | |
| for index_block, block in enumerate(self.single_transformer_blocks): | |
| if torch.is_grad_enabled() and self.gradient_checkpointing: | |
| def create_custom_forward(module): | |
| def custom_forward(*inputs): | |
| return module(*inputs) | |
| return custom_forward | |
| ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} | |
| encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( | |
| create_custom_forward(block), | |
| hidden_states, | |
| encoder_hidden_states, | |
| single_stream_mod, | |
| concat_rotary_emb, | |
| joint_attention_kwargs, | |
| **ckpt_kwargs, | |
| ) | |
| else: | |
| encoder_hidden_states, hidden_states = block( | |
| hidden_states=hidden_states, | |
| encoder_hidden_states=encoder_hidden_states, | |
| temb_mod_params=single_stream_mod, | |
| image_rotary_emb=concat_rotary_emb, | |
| joint_attention_kwargs=joint_attention_kwargs, | |
| ) | |
| # 6. Output layers | |
| hidden_states = self.norm_out(hidden_states, temb) | |
| output = self.proj_out(hidden_states) | |
| if self.sp_world_size > 1: | |
| output = self.all_gather(output, dim=1) | |
| if not return_dict: | |
| return (output,) | |
| return Transformer2DModelOutput(sample=output) | |
| def from_pretrained( | |
| cls, pretrained_model_path, subfolder=None, transformer_additional_kwargs={}, | |
| low_cpu_mem_usage=False, torch_dtype=torch.bfloat16 | |
| ): | |
| if subfolder is not None: | |
| pretrained_model_path = os.path.join(pretrained_model_path, subfolder) | |
| print(f"loaded 3D transformer's pretrained weights from {pretrained_model_path} ...") | |
| config_file = os.path.join(pretrained_model_path, 'config.json') | |
| if not os.path.isfile(config_file): | |
| raise RuntimeError(f"{config_file} does not exist") | |
| with open(config_file, "r") as f: | |
| config = json.load(f) | |
| from diffusers.utils import WEIGHTS_NAME | |
| model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) | |
| model_file_safetensors = model_file.replace(".bin", ".safetensors") | |
| if "dict_mapping" in transformer_additional_kwargs.keys(): | |
| for key in transformer_additional_kwargs["dict_mapping"]: | |
| transformer_additional_kwargs[transformer_additional_kwargs["dict_mapping"][key]] = config[key] | |
| if low_cpu_mem_usage: | |
| try: | |
| import re | |
| from diffusers import __version__ as diffusers_version | |
| if diffusers_version >= "0.33.0": | |
| from diffusers.models.model_loading_utils import \ | |
| load_model_dict_into_meta | |
| else: | |
| from diffusers.models.modeling_utils import \ | |
| load_model_dict_into_meta | |
| from diffusers.utils import is_accelerate_available | |
| if is_accelerate_available(): | |
| import accelerate | |
| # Instantiate model with empty weights | |
| with accelerate.init_empty_weights(): | |
| model = cls.from_config(config, **transformer_additional_kwargs) | |
| param_device = "cpu" | |
| if os.path.exists(model_file): | |
| state_dict = torch.load(model_file, map_location="cpu") | |
| elif os.path.exists(model_file_safetensors): | |
| from safetensors.torch import load_file, safe_open | |
| state_dict = load_file(model_file_safetensors) | |
| else: | |
| from safetensors.torch import load_file, safe_open | |
| model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors")) | |
| state_dict = {} | |
| print(model_files_safetensors) | |
| for _model_file_safetensors in model_files_safetensors: | |
| _state_dict = load_file(_model_file_safetensors) | |
| for key in _state_dict: | |
| state_dict[key] = _state_dict[key] | |
| filtered_state_dict = {} | |
| for key in state_dict: | |
| if key in model.state_dict() and model.state_dict()[key].size() == state_dict[key].size(): | |
| filtered_state_dict[key] = state_dict[key] | |
| else: | |
| print(f"Skipping key '{key}' due to size mismatch or absence in model.") | |
| model_keys = set(model.state_dict().keys()) | |
| loaded_keys = set(filtered_state_dict.keys()) | |
| missing_keys = model_keys - loaded_keys | |
| def initialize_missing_parameters(missing_keys, model_state_dict, torch_dtype=None): | |
| initialized_dict = {} | |
| with torch.no_grad(): | |
| for key in missing_keys: | |
| param_shape = model_state_dict[key].shape | |
| param_dtype = torch_dtype if torch_dtype is not None else model_state_dict[key].dtype | |
| if 'weight' in key: | |
| if any(norm_type in key for norm_type in ['norm', 'ln_', 'layer_norm', 'group_norm', 'batch_norm']): | |
| initialized_dict[key] = torch.ones(param_shape, dtype=param_dtype) | |
| elif 'embedding' in key or 'embed' in key: | |
| initialized_dict[key] = torch.randn(param_shape, dtype=param_dtype) * 0.02 | |
| elif 'head' in key or 'output' in key or 'proj_out' in key: | |
| initialized_dict[key] = torch.zeros(param_shape, dtype=param_dtype) | |
| elif len(param_shape) >= 2: | |
| initialized_dict[key] = torch.empty(param_shape, dtype=param_dtype) | |
| nn.init.xavier_uniform_(initialized_dict[key]) | |
| else: | |
| initialized_dict[key] = torch.randn(param_shape, dtype=param_dtype) * 0.02 | |
| elif 'bias' in key: | |
| initialized_dict[key] = torch.zeros(param_shape, dtype=param_dtype) | |
| elif 'running_mean' in key: | |
| initialized_dict[key] = torch.zeros(param_shape, dtype=param_dtype) | |
| elif 'running_var' in key: | |
| initialized_dict[key] = torch.ones(param_shape, dtype=param_dtype) | |
| elif 'num_batches_tracked' in key: | |
| initialized_dict[key] = torch.zeros(param_shape, dtype=torch.long) | |
| else: | |
| initialized_dict[key] = torch.zeros(param_shape, dtype=param_dtype) | |
| return initialized_dict | |
| if missing_keys: | |
| print(f"Missing keys will be initialized: {sorted(missing_keys)}") | |
| initialized_params = initialize_missing_parameters( | |
| missing_keys, | |
| model.state_dict(), | |
| torch_dtype | |
| ) | |
| filtered_state_dict.update(initialized_params) | |
| if diffusers_version >= "0.33.0": | |
| # Diffusers has refactored `load_model_dict_into_meta` since version 0.33.0 in this commit: | |
| # https://github.com/huggingface/diffusers/commit/f5929e03060d56063ff34b25a8308833bec7c785. | |
| load_model_dict_into_meta( | |
| model, | |
| filtered_state_dict, | |
| dtype=torch_dtype, | |
| model_name_or_path=pretrained_model_path, | |
| ) | |
| else: | |
| model._convert_deprecated_attention_blocks(filtered_state_dict) | |
| unexpected_keys = load_model_dict_into_meta( | |
| model, | |
| filtered_state_dict, | |
| device=param_device, | |
| dtype=torch_dtype, | |
| model_name_or_path=pretrained_model_path, | |
| ) | |
| if cls._keys_to_ignore_on_load_unexpected is not None: | |
| for pat in cls._keys_to_ignore_on_load_unexpected: | |
| unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] | |
| if len(unexpected_keys) > 0: | |
| print( | |
| f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" | |
| ) | |
| return model | |
| except Exception as e: | |
| print( | |
| f"The low_cpu_mem_usage mode is not work because {e}. Use low_cpu_mem_usage=False instead." | |
| ) | |
| model = cls.from_config(config, **transformer_additional_kwargs) | |
| if os.path.exists(model_file): | |
| state_dict = torch.load(model_file, map_location="cpu") | |
| elif os.path.exists(model_file_safetensors): | |
| from safetensors.torch import load_file, safe_open | |
| state_dict = load_file(model_file_safetensors) | |
| else: | |
| from safetensors.torch import load_file, safe_open | |
| model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors")) | |
| state_dict = {} | |
| for _model_file_safetensors in model_files_safetensors: | |
| _state_dict = load_file(_model_file_safetensors) | |
| for key in _state_dict: | |
| state_dict[key] = _state_dict[key] | |
| tmp_state_dict = {} | |
| for key in state_dict: | |
| if key in model.state_dict().keys() and model.state_dict()[key].size() == state_dict[key].size(): | |
| tmp_state_dict[key] = state_dict[key] | |
| else: | |
| print(key, "Size don't match, skip") | |
| state_dict = tmp_state_dict | |
| m, u = model.load_state_dict(state_dict, strict=False) | |
| print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};") | |
| print(m) | |
| params = [p.numel() if "." in n else 0 for n, p in model.named_parameters()] | |
| print(f"### All Parameters: {sum(params) / 1e6} M") | |
| params = [p.numel() if "attn1." in n else 0 for n, p in model.named_parameters()] | |
| print(f"### attn1 Parameters: {sum(params) / 1e6} M") | |
| model = model.to(torch_dtype) | |
| return model |