Spaces:
Running
on
Zero
Running
on
Zero
| # https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/flux2/pipeline_flux2.py | |
| # Copyright 2025 Black Forest Labs and The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import inspect | |
| from diffusers.utils import (BaseOutput, is_torch_xla_available, logging, | |
| replace_example_docstring) | |
| from diffusers.image_processor import VaeImageProcessor | |
| from dataclasses import dataclass | |
| from typing import Any, Callable, Dict, List, Optional, Tuple, Union | |
| import numpy as np | |
| import PIL | |
| import torch | |
| import torch.nn.functional as F | |
| from diffusers.pipelines.pipeline_utils import DiffusionPipeline | |
| from diffusers.schedulers import FlowMatchEulerDiscreteScheduler | |
| from diffusers.utils import (is_torch_xla_available, logging, | |
| replace_example_docstring) | |
| from diffusers.utils.torch_utils import randn_tensor | |
| from ..models import (AutoencoderKLFlux2, Flux2ImageProcessor, | |
| Flux2ControlTransformer2DModel, Mistral3ForConditionalGeneration, AutoProcessor) | |
| if is_torch_xla_available(): | |
| import torch_xla.core.xla_model as xm | |
| XLA_AVAILABLE = True | |
| else: | |
| XLA_AVAILABLE = False | |
| logger = logging.get_logger(__name__) # pylint: disable=invalid-name | |
| EXAMPLE_DOC_STRING = """ | |
| Examples: | |
| ```py | |
| >>> import torch | |
| >>> from diffusers import Flux2Pipeline | |
| >>> pipe = Flux2Pipeline.from_pretrained("black-forest-labs/FLUX.2-dev", torch_dtype=torch.bfloat16) | |
| >>> pipe.to("cuda") | |
| >>> prompt = "A cat holding a sign that says hello world" | |
| >>> # Depending on the variant being used, the pipeline call will slightly vary. | |
| >>> # Refer to the pipeline documentation for more details. | |
| >>> image = pipe(prompt, num_inference_steps=50, guidance_scale=2.5).images[0] | |
| >>> image.save("flux.png") | |
| ``` | |
| """ | |
| def format_text_input(prompts: List[str], system_message: str = None): | |
| # Remove [IMG] tokens from prompts to avoid Pixtral validation issues | |
| # when truncation is enabled. The processor counts [IMG] tokens and fails | |
| # if the count changes after truncation. | |
| cleaned_txt = [prompt.replace("[IMG]", "") for prompt in prompts] | |
| return [ | |
| [ | |
| { | |
| "role": "system", | |
| "content": [{"type": "text", "text": system_message}], | |
| }, | |
| {"role": "user", "content": [{"type": "text", "text": prompt}]}, | |
| ] | |
| for prompt in cleaned_txt | |
| ] | |
| def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float: | |
| a1, b1 = 8.73809524e-05, 1.89833333 | |
| a2, b2 = 0.00016927, 0.45666666 | |
| if image_seq_len > 4300: | |
| mu = a2 * image_seq_len + b2 | |
| return float(mu) | |
| m_200 = a2 * image_seq_len + b2 | |
| m_10 = a1 * image_seq_len + b1 | |
| a = (m_200 - m_10) / 190.0 | |
| b = m_200 - 200.0 * a | |
| mu = a * num_steps + b | |
| return float(mu) | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps | |
| def retrieve_timesteps( | |
| scheduler, | |
| num_inference_steps: Optional[int] = None, | |
| device: Optional[Union[str, torch.device]] = None, | |
| timesteps: Optional[List[int]] = None, | |
| sigmas: Optional[List[float]] = None, | |
| **kwargs, | |
| ): | |
| r""" | |
| Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles | |
| custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. | |
| Args: | |
| scheduler (`SchedulerMixin`): | |
| The scheduler to get timesteps from. | |
| num_inference_steps (`int`): | |
| The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` | |
| must be `None`. | |
| device (`str` or `torch.device`, *optional*): | |
| The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. | |
| timesteps (`List[int]`, *optional*): | |
| Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, | |
| `num_inference_steps` and `sigmas` must be `None`. | |
| sigmas (`List[float]`, *optional*): | |
| Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, | |
| `num_inference_steps` and `timesteps` must be `None`. | |
| Returns: | |
| `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the | |
| second element is the number of inference steps. | |
| """ | |
| if timesteps is not None and sigmas is not None: | |
| raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") | |
| if timesteps is not None: | |
| accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) | |
| if not accepts_timesteps: | |
| raise ValueError( | |
| f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" | |
| f" timestep schedules. Please check whether you are using the correct scheduler." | |
| ) | |
| scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| num_inference_steps = len(timesteps) | |
| elif sigmas is not None: | |
| accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) | |
| if not accept_sigmas: | |
| raise ValueError( | |
| f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" | |
| f" sigmas schedules. Please check whether you are using the correct scheduler." | |
| ) | |
| scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| num_inference_steps = len(timesteps) | |
| else: | |
| scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| return timesteps, num_inference_steps | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents | |
| def retrieve_latents( | |
| encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" | |
| ): | |
| if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": | |
| return encoder_output.latent_dist.sample(generator) | |
| elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": | |
| return encoder_output.latent_dist.mode() | |
| elif hasattr(encoder_output, "latents"): | |
| return encoder_output.latents | |
| else: | |
| raise AttributeError("Could not access latents of provided encoder_output") | |
| class Flux2PipelineOutput(BaseOutput): | |
| """ | |
| Output class for Flux2 image generation pipelines. | |
| Args: | |
| images (`List[PIL.Image.Image]` or `torch.Tensor` or `np.ndarray`) | |
| List of denoised PIL images of length `batch_size` or numpy array or torch tensor of shape `(batch_size, | |
| height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion | |
| pipeline. Torch tensors can represent either the denoised images or the intermediate latents ready to be | |
| passed to the decoder. | |
| """ | |
| images: Union[List[PIL.Image.Image], np.ndarray] | |
| class Flux2ControlPipeline(DiffusionPipeline): | |
| r""" | |
| The Flux2 pipeline for text-to-image generation. | |
| Reference: [https://bfl.ai/blog/flux-2](https://bfl.ai/blog/flux-2) | |
| Args: | |
| transformer ([`Flux2ControlTransformer2DModel`]): | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. | |
| scheduler ([`FlowMatchEulerDiscreteScheduler`]): | |
| A scheduler to be used in combination with `transformer` to denoise the encoded image latents. | |
| vae ([`AutoencoderKLFlux2`]): | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. | |
| text_encoder ([`Mistral3ForConditionalGeneration`]): | |
| [Mistral3ForConditionalGeneration](https://huggingface.co/docs/transformers/en/model_doc/mistral3#transformers.Mistral3ForConditionalGeneration) | |
| tokenizer (`AutoProcessor`): | |
| Tokenizer of class | |
| [PixtralProcessor](https://huggingface.co/docs/transformers/en/model_doc/pixtral#transformers.PixtralProcessor). | |
| """ | |
| model_cpu_offload_seq = "text_encoder->transformer->vae" | |
| _callback_tensor_inputs = ["latents", "prompt_embeds"] | |
| def __init__( | |
| self, | |
| scheduler: FlowMatchEulerDiscreteScheduler, | |
| vae: AutoencoderKLFlux2, | |
| text_encoder: Mistral3ForConditionalGeneration, | |
| tokenizer: AutoProcessor, | |
| transformer: Flux2ControlTransformer2DModel, | |
| ): | |
| super().__init__() | |
| self.register_modules( | |
| vae=vae, | |
| text_encoder=text_encoder, | |
| tokenizer=tokenizer, | |
| scheduler=scheduler, | |
| transformer=transformer, | |
| ) | |
| self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 | |
| # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible | |
| # by the patch size. So the vae scale factor is multiplied by the patch size to account for this | |
| self.image_processor = Flux2ImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) | |
| self.diffusers_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) | |
| self.mask_processor = VaeImageProcessor( | |
| vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True | |
| ) | |
| self.tokenizer_max_length = 512 | |
| self.default_sample_size = 128 | |
| # fmt: off | |
| self.system_message = "You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object attribution and actions without speculation." | |
| # fmt: on | |
| def _get_mistral_3_small_prompt_embeds( | |
| text_encoder: Mistral3ForConditionalGeneration, | |
| tokenizer: AutoProcessor, | |
| prompt: Union[str, List[str]], | |
| dtype: Optional[torch.dtype] = None, | |
| device: Optional[torch.device] = None, | |
| max_sequence_length: int = 512, | |
| # fmt: off | |
| system_message: str = "You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object attribution and actions without speculation.", | |
| # fmt: on | |
| hidden_states_layers: List[int] = (10, 20, 30), | |
| ): | |
| dtype = text_encoder.dtype if dtype is None else dtype | |
| device = text_encoder.device if device is None else device | |
| prompt = [prompt] if isinstance(prompt, str) else prompt | |
| # Format input messages | |
| messages_batch = format_text_input(prompts=prompt, system_message=system_message) | |
| # Process all messages at once | |
| inputs = tokenizer.apply_chat_template( | |
| messages_batch, | |
| add_generation_prompt=False, | |
| tokenize=True, | |
| return_dict=True, | |
| return_tensors="pt", | |
| padding="max_length", | |
| truncation=True, | |
| max_length=max_sequence_length, | |
| ) | |
| # Move to device | |
| input_ids = inputs["input_ids"].to(device) | |
| attention_mask = inputs["attention_mask"].to(device) | |
| # Forward pass through the model | |
| output = text_encoder( | |
| input_ids=input_ids, | |
| attention_mask=attention_mask, | |
| output_hidden_states=True, | |
| use_cache=False, | |
| ) | |
| # Only use outputs from intermediate layers and stack them | |
| out = torch.stack([output.hidden_states[k] for k in hidden_states_layers], dim=1) | |
| out = out.to(dtype=dtype, device=device) | |
| batch_size, num_channels, seq_len, hidden_dim = out.shape | |
| prompt_embeds = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, num_channels * hidden_dim) | |
| return prompt_embeds | |
| def _prepare_text_ids( | |
| x: torch.Tensor, # (B, L, D) or (L, D) | |
| t_coord: Optional[torch.Tensor] = None, | |
| ): | |
| B, L, _ = x.shape | |
| out_ids = [] | |
| for i in range(B): | |
| t = torch.arange(1) if t_coord is None else t_coord[i] | |
| h = torch.arange(1) | |
| w = torch.arange(1) | |
| l = torch.arange(L) | |
| coords = torch.cartesian_prod(t, h, w, l) | |
| out_ids.append(coords) | |
| return torch.stack(out_ids) | |
| def encode_prompt( | |
| self, | |
| prompt: Union[str, List[str]], | |
| device: Optional[torch.device] = None, | |
| num_images_per_prompt: int = 1, | |
| prompt_embeds: Optional[torch.Tensor] = None, | |
| max_sequence_length: int = 512, | |
| text_encoder_out_layers: Tuple[int] = (10, 20, 30), | |
| ): | |
| device = device or self._execution_device | |
| if prompt is None: | |
| prompt = "" | |
| prompt = [prompt] if isinstance(prompt, str) else prompt | |
| if prompt_embeds is None: | |
| prompt_embeds = self._get_mistral_3_small_prompt_embeds( | |
| text_encoder=self.text_encoder, | |
| tokenizer=self.tokenizer, | |
| prompt=prompt, | |
| device=device, | |
| max_sequence_length=max_sequence_length, | |
| system_message=self.system_message, | |
| hidden_states_layers=text_encoder_out_layers, | |
| ) | |
| batch_size, seq_len, _ = prompt_embeds.shape | |
| prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) | |
| prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) | |
| text_ids = self._prepare_text_ids(prompt_embeds) | |
| text_ids = text_ids.to(device) | |
| return prompt_embeds, text_ids | |
| def _prepare_latent_ids( | |
| latents: torch.Tensor, # (B, C, H, W) | |
| ): | |
| r""" | |
| Generates 4D position coordinates (T, H, W, L) for latent tensors. | |
| Args: | |
| latents (torch.Tensor): | |
| Latent tensor of shape (B, C, H, W) | |
| Returns: | |
| torch.Tensor: | |
| Position IDs tensor of shape (B, H*W, 4) All batches share the same coordinate structure: T=0, | |
| H=[0..H-1], W=[0..W-1], L=0 | |
| """ | |
| batch_size, _, height, width = latents.shape | |
| t = torch.arange(1) # [0] - time dimension | |
| h = torch.arange(height) | |
| w = torch.arange(width) | |
| l = torch.arange(1) # [0] - layer dimension | |
| # Create position IDs: (H*W, 4) | |
| latent_ids = torch.cartesian_prod(t, h, w, l) | |
| # Expand to batch: (B, H*W, 4) | |
| latent_ids = latent_ids.unsqueeze(0).expand(batch_size, -1, -1) | |
| return latent_ids | |
| def _prepare_image_ids( | |
| image_latents: List[torch.Tensor], # [(1, C, H, W), (1, C, H, W), ...] | |
| scale: int = 10, | |
| ): | |
| r""" | |
| Generates 4D time-space coordinates (T, H, W, L) for a sequence of image latents. | |
| This function creates a unique coordinate for every pixel/patch across all input latent with different | |
| dimensions. | |
| Args: | |
| image_latents (List[torch.Tensor]): | |
| A list of image latent feature tensors, typically of shape (C, H, W). | |
| scale (int, optional): | |
| A factor used to define the time separation (T-coordinate) between latents. T-coordinate for the i-th | |
| latent is: 'scale + scale * i'. Defaults to 10. | |
| Returns: | |
| torch.Tensor: | |
| The combined coordinate tensor. Shape: (1, N_total, 4) Where N_total is the sum of (H * W) for all | |
| input latents. | |
| Coordinate Components (Dimension 4): | |
| - T (Time): The unique index indicating which latent image the coordinate belongs to. | |
| - H (Height): The row index within that latent image. | |
| - W (Width): The column index within that latent image. | |
| - L (Seq. Length): A sequence length dimension, which is always fixed at 0 (size 1) | |
| """ | |
| if not isinstance(image_latents, list): | |
| raise ValueError(f"Expected `image_latents` to be a list, got {type(image_latents)}.") | |
| # create time offset for each reference image | |
| t_coords = [scale + scale * t for t in torch.arange(0, len(image_latents))] | |
| t_coords = [t.view(-1) for t in t_coords] | |
| image_latent_ids = [] | |
| for x, t in zip(image_latents, t_coords): | |
| x = x.squeeze(0) | |
| _, height, width = x.shape | |
| x_ids = torch.cartesian_prod(t, torch.arange(height), torch.arange(width), torch.arange(1)) | |
| image_latent_ids.append(x_ids) | |
| image_latent_ids = torch.cat(image_latent_ids, dim=0) | |
| image_latent_ids = image_latent_ids.unsqueeze(0) | |
| return image_latent_ids | |
| def _patchify_latents(latents): | |
| batch_size, num_channels_latents, height, width = latents.shape | |
| latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) | |
| latents = latents.permute(0, 1, 3, 5, 2, 4) | |
| latents = latents.reshape(batch_size, num_channels_latents * 4, height // 2, width // 2) | |
| return latents | |
| def _unpatchify_latents(latents): | |
| batch_size, num_channels_latents, height, width = latents.shape | |
| latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), 2, 2, height, width) | |
| latents = latents.permute(0, 1, 4, 2, 5, 3) | |
| latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), height * 2, width * 2) | |
| return latents | |
| def _pack_latents(latents): | |
| """ | |
| pack latents: (batch_size, num_channels, height, width) -> (batch_size, height * width, num_channels) | |
| """ | |
| batch_size, num_channels, height, width = latents.shape | |
| latents = latents.reshape(batch_size, num_channels, height * width).permute(0, 2, 1) | |
| return latents | |
| def _unpack_latents_with_ids(x: torch.Tensor, x_ids: torch.Tensor) -> list[torch.Tensor]: | |
| """ | |
| using position ids to scatter tokens into place | |
| """ | |
| x_list = [] | |
| for data, pos in zip(x, x_ids): | |
| _, ch = data.shape # noqa: F841 | |
| h_ids = pos[:, 1].to(torch.int64) | |
| w_ids = pos[:, 2].to(torch.int64) | |
| h = torch.max(h_ids) + 1 | |
| w = torch.max(w_ids) + 1 | |
| flat_ids = h_ids * w + w_ids | |
| out = torch.zeros((h * w, ch), device=data.device, dtype=data.dtype) | |
| out.scatter_(0, flat_ids.unsqueeze(1).expand(-1, ch), data) | |
| # reshape from (H * W, C) to (H, W, C) and permute to (C, H, W) | |
| out = out.view(h, w, ch).permute(2, 0, 1) | |
| x_list.append(out) | |
| return torch.stack(x_list, dim=0) | |
| def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): | |
| if image.ndim != 4: | |
| raise ValueError(f"Expected image dims 4, got {image.ndim}.") | |
| image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax") | |
| image_latents = self._patchify_latents(image_latents) | |
| latents_bn_mean = self.vae.bn.running_mean.view(1, -1, 1, 1).to(image_latents.device, image_latents.dtype) | |
| latents_bn_std = torch.sqrt(self.vae.bn.running_var.view(1, -1, 1, 1) + self.vae.config.batch_norm_eps) | |
| image_latents = (image_latents - latents_bn_mean) / latents_bn_std | |
| return image_latents | |
| def prepare_latents( | |
| self, | |
| batch_size, | |
| num_latents_channels, | |
| height, | |
| width, | |
| dtype, | |
| device, | |
| generator: torch.Generator, | |
| latents: Optional[torch.Tensor] = None, | |
| ): | |
| # VAE applies 8x compression on images but we must also account for packing which requires | |
| # latent height and width to be divisible by 2. | |
| height = 2 * (int(height) // (self.vae_scale_factor * 2)) | |
| width = 2 * (int(width) // (self.vae_scale_factor * 2)) | |
| shape = (batch_size, num_latents_channels * 4, height // 2, width // 2) | |
| if isinstance(generator, list) and len(generator) != batch_size: | |
| raise ValueError( | |
| f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" | |
| f" size of {batch_size}. Make sure the batch size matches the length of the generators." | |
| ) | |
| if latents is None: | |
| latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) | |
| else: | |
| latents = latents.to(device=device, dtype=dtype) | |
| latent_ids = self._prepare_latent_ids(latents) | |
| latent_ids = latent_ids.to(device) | |
| latents = self._pack_latents(latents) # [B, C, H, W] -> [B, H*W, C] | |
| return latents, latent_ids | |
| def prepare_image_latents( | |
| self, | |
| images: List[torch.Tensor], | |
| batch_size, | |
| generator: torch.Generator, | |
| device, | |
| dtype, | |
| ): | |
| image_latents = [] | |
| for image in images: | |
| image = image.to(device=device, dtype=dtype) | |
| imagge_latent = self._encode_vae_image(image=image, generator=generator) | |
| image_latents.append(imagge_latent) # (1, 128, 32, 32) | |
| image_latent_ids = self._prepare_image_ids(image_latents) | |
| # Pack each latent and concatenate | |
| packed_latents = [] | |
| for latent in image_latents: | |
| # latent: (1, 128, 32, 32) | |
| packed = self._pack_latents(latent) # (1, 1024, 128) | |
| packed = packed.squeeze(0) # (1024, 128) - remove batch dim | |
| packed_latents.append(packed) | |
| # Concatenate all reference tokens along sequence dimension | |
| image_latents = torch.cat(packed_latents, dim=0) # (N*1024, 128) | |
| image_latents = image_latents.unsqueeze(0) # (1, N*1024, 128) | |
| image_latents = image_latents.repeat(batch_size, 1, 1) | |
| image_latent_ids = image_latent_ids.repeat(batch_size, 1, 1) | |
| image_latent_ids = image_latent_ids.to(device) | |
| return image_latents, image_latent_ids | |
| def check_inputs( | |
| self, | |
| prompt, | |
| height, | |
| width, | |
| prompt_embeds=None, | |
| callback_on_step_end_tensor_inputs=None, | |
| ): | |
| if ( | |
| height is not None | |
| and height % (self.vae_scale_factor * 2) != 0 | |
| or width is not None | |
| and width % (self.vae_scale_factor * 2) != 0 | |
| ): | |
| logger.warning( | |
| f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" | |
| ) | |
| if callback_on_step_end_tensor_inputs is not None and not all( | |
| k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs | |
| ): | |
| raise ValueError( | |
| f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" | |
| ) | |
| if prompt is not None and prompt_embeds is not None: | |
| raise ValueError( | |
| f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" | |
| " only forward one of the two." | |
| ) | |
| elif prompt is None and prompt_embeds is None: | |
| raise ValueError( | |
| "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." | |
| ) | |
| elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): | |
| raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") | |
| def guidance_scale(self): | |
| return self._guidance_scale | |
| def joint_attention_kwargs(self): | |
| return self._joint_attention_kwargs | |
| def num_timesteps(self): | |
| return self._num_timesteps | |
| def current_timestep(self): | |
| return self._current_timestep | |
| def interrupt(self): | |
| return self._interrupt | |
| def __call__( | |
| self, | |
| prompt: Union[str, List[str]] = None, | |
| height: Optional[int] = None, | |
| width: Optional[int] = None, | |
| image: Optional[Union[List[PIL.Image.Image], PIL.Image.Image]] = None, | |
| inpaint_image: Optional[Union[List[PIL.Image.Image], PIL.Image.Image]] = None, | |
| mask_image: Union[torch.FloatTensor] = None, | |
| control_image: Union[torch.FloatTensor] = None, | |
| control_context_scale: float = 1.0, | |
| num_inference_steps: int = 50, | |
| sigmas: Optional[List[float]] = None, | |
| guidance_scale: Optional[float] = 4.0, | |
| num_images_per_prompt: int = 1, | |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
| latents: Optional[torch.Tensor] = None, | |
| prompt_embeds: Optional[torch.Tensor] = None, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| attention_kwargs: Optional[Dict[str, Any]] = None, | |
| callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, | |
| callback_on_step_end_tensor_inputs: List[str] = ["latents"], | |
| max_sequence_length: int = 512, | |
| text_encoder_out_layers: Tuple[int] = (10, 20, 30), | |
| ): | |
| r""" | |
| Function invoked when calling the pipeline for generation. | |
| Args: | |
| image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): | |
| `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list | |
| or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a | |
| list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image | |
| latents as `image`, but if passing latents directly it is not encoded again. | |
| prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. | |
| instead. | |
| guidance_scale (`float`, *optional*, defaults to 1.0): | |
| Guidance scale as defined in [Classifier-Free Diffusion | |
| Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. | |
| of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting | |
| `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to | |
| the text `prompt`, usually at the expense of lower image quality. | |
| height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results. | |
| width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results. | |
| num_inference_steps (`int`, *optional*, defaults to 50): | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. | |
| sigmas (`List[float]`, *optional*): | |
| Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in | |
| their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed | |
| will be used. | |
| num_images_per_prompt (`int`, *optional*, defaults to 1): | |
| The number of images to generate per prompt. | |
| generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | |
| One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | |
| to make generation deterministic. | |
| latents (`torch.Tensor`, *optional*): | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random `generator`. | |
| prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | |
| provided, text embeddings will be generated from `prompt` input argument. | |
| output_type (`str`, *optional*, defaults to `"pil"`): | |
| The output format of the generate image. Choose between | |
| [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. | |
| attention_kwargs (`dict`, *optional*): | |
| A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | |
| `self.processor` in | |
| [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | |
| callback_on_step_end (`Callable`, *optional*): | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, | |
| callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by | |
| `callback_on_step_end_tensor_inputs`. | |
| callback_on_step_end_tensor_inputs (`List`, *optional*): | |
| The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list | |
| will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the | |
| `._callback_tensor_inputs` attribute of your pipeline class. | |
| max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. | |
| text_encoder_out_layers (`Tuple[int]`): | |
| Layer indices to use in the `text_encoder` to derive the final prompt embeddings. | |
| Examples: | |
| Returns: | |
| [`~pipelines.flux2.Flux2PipelineOutput`] or `tuple`: [`~pipelines.flux2.Flux2PipelineOutput`] if | |
| `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the | |
| generated images. | |
| """ | |
| # 1. Check inputs. Raise error if not correct | |
| self.check_inputs( | |
| prompt=prompt, | |
| height=height, | |
| width=width, | |
| prompt_embeds=prompt_embeds, | |
| callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, | |
| ) | |
| self._guidance_scale = guidance_scale | |
| self._attention_kwargs = attention_kwargs | |
| self._current_timestep = None | |
| self._interrupt = False | |
| # 2. Define call parameters | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| device = self._execution_device | |
| weight_dtype = self.text_encoder.dtype | |
| latents_bn_mean = self.vae.bn.running_mean.view(1, -1, 1, 1).to(device, weight_dtype) | |
| latents_bn_std = torch.sqrt(self.vae.bn.running_var.view(1, -1, 1, 1) + self.vae.config.batch_norm_eps).to( | |
| device, weight_dtype | |
| ) | |
| height = height or self.default_sample_size * self.vae_scale_factor | |
| width = width or self.default_sample_size * self.vae_scale_factor | |
| num_channels_latents = self.transformer.config.in_channels // 4 | |
| # Prepare mask latent variables | |
| if mask_image is not None: | |
| mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width) | |
| mask_condition = torch.tile(mask_condition, [1, 3, 1, 1]).to(dtype=weight_dtype, device=device) | |
| if inpaint_image is not None: | |
| init_image = self.diffusers_image_processor.preprocess(inpaint_image, height=height, width=width) | |
| init_image = init_image.to(dtype=weight_dtype, device=device) * (mask_condition < 0.5) | |
| inpaint_latent = self.vae.encode(init_image)[0].mode() | |
| else: | |
| inpaint_latent = torch.zeros((batch_size, num_channels_latents * 4, height // 2 // self.vae_scale_factor, width // 2 // self.vae_scale_factor)).to(device, weight_dtype) | |
| if control_image is not None: | |
| control_image = self.diffusers_image_processor.preprocess(control_image, height=height, width=width) | |
| control_image = control_image.to(dtype=weight_dtype, device=device) | |
| control_latents = self.vae.encode(control_image)[0].mode() | |
| else: | |
| control_latents = torch.zeros_like(inpaint_latent) | |
| mask_condition = F.interpolate(1 - mask_condition[:, :1], size=control_latents.size()[-2:], mode='nearest').to(device, weight_dtype) | |
| mask_condition = self._patchify_latents(mask_condition) | |
| mask_condition = self._pack_latents(mask_condition) | |
| if inpaint_image is not None: | |
| inpaint_latent = self._patchify_latents(inpaint_latent) | |
| inpaint_latent = (inpaint_latent - latents_bn_mean) / latents_bn_std | |
| inpaint_latent = self._pack_latents(inpaint_latent) | |
| else: | |
| inpaint_latent = self._patchify_latents(inpaint_latent) | |
| inpaint_latent = self._pack_latents(inpaint_latent) | |
| if control_image is not None: | |
| control_latents = self._patchify_latents(control_latents) | |
| control_latents = (control_latents - latents_bn_mean) / latents_bn_std | |
| control_latents = self._pack_latents(control_latents) | |
| else: | |
| control_latents = self._patchify_latents(control_latents) | |
| control_latents = self._pack_latents(control_latents) | |
| control_context = torch.concat([control_latents, mask_condition, inpaint_latent], dim=2) | |
| # 3. prepare text embeddings | |
| prompt_embeds, text_ids = self.encode_prompt( | |
| prompt=prompt, | |
| prompt_embeds=prompt_embeds, | |
| device=device, | |
| num_images_per_prompt=num_images_per_prompt, | |
| max_sequence_length=max_sequence_length, | |
| text_encoder_out_layers=text_encoder_out_layers, | |
| ) | |
| # 4. process images | |
| if image is not None and not isinstance(image, list): | |
| image = [image] | |
| condition_images = None | |
| if image is not None: | |
| for img in image: | |
| self.image_processor.check_image_input(img) | |
| condition_images = [] | |
| for img in image: | |
| image_width, image_height = img.size | |
| if image_width * image_height > 1024 * 1024: | |
| img = self.image_processor._resize_to_target_area(img, 1024 * 1024) | |
| image_width, image_height = img.size | |
| multiple_of = self.vae_scale_factor * 2 | |
| image_width = (image_width // multiple_of) * multiple_of | |
| image_height = (image_height // multiple_of) * multiple_of | |
| img = self.image_processor.preprocess(img, height=image_height, width=image_width, resize_mode="crop") | |
| condition_images.append(img) | |
| height = height or image_height | |
| width = width or image_width | |
| # 5. prepare latent variables | |
| latents, latent_ids = self.prepare_latents( | |
| batch_size=batch_size * num_images_per_prompt, | |
| num_latents_channels=num_channels_latents, | |
| height=height, | |
| width=width, | |
| dtype=prompt_embeds.dtype, | |
| device=device, | |
| generator=generator, | |
| latents=latents, | |
| ) | |
| image_latents = None | |
| image_latent_ids = None | |
| if condition_images is not None: | |
| image_latents, image_latent_ids = self.prepare_image_latents( | |
| images=condition_images, | |
| batch_size=batch_size * num_images_per_prompt, | |
| generator=generator, | |
| device=device, | |
| dtype=self.vae.dtype, | |
| ) | |
| # 6. Prepare timesteps | |
| sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas | |
| if hasattr(self.scheduler.config, "use_flow_sigmas") and self.scheduler.config.use_flow_sigmas: | |
| sigmas = None | |
| image_seq_len = latents.shape[1] | |
| mu = compute_empirical_mu(image_seq_len=image_seq_len, num_steps=num_inference_steps) | |
| timesteps, num_inference_steps = retrieve_timesteps( | |
| self.scheduler, | |
| num_inference_steps, | |
| device, | |
| sigmas=sigmas, | |
| mu=mu, | |
| ) | |
| num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) | |
| self._num_timesteps = len(timesteps) | |
| # handle guidance | |
| guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) | |
| guidance = guidance.expand(latents.shape[0]) | |
| # 7. Denoising loop | |
| # We set the index here to remove DtoH sync, helpful especially during compilation. | |
| # Check out more details here: https://github.com/huggingface/diffusers/pull/11696 | |
| self.scheduler.set_begin_index(0) | |
| with self.progress_bar(total=num_inference_steps) as progress_bar: | |
| for i, t in enumerate(timesteps): | |
| if self.interrupt: | |
| continue | |
| self._current_timestep = t | |
| # broadcast to batch dimension in a way that's compatible with ONNX/Core ML | |
| timestep = t.expand(latents.shape[0]).to(latents.dtype) | |
| latent_model_input = latents.to(self.transformer.dtype) | |
| control_context_input = control_context.to(self.transformer.dtype) | |
| latent_image_ids = latent_ids | |
| if image_latents is not None: | |
| latent_model_input = torch.cat([latents, image_latents], dim=1).to(self.transformer.dtype) | |
| latent_image_ids = torch.cat([latent_ids, image_latent_ids], dim=1) | |
| local_bs, local_length, local_c = control_context.size() | |
| control_context_input = torch.cat( | |
| [ | |
| control_context, | |
| torch.zeros( | |
| [ | |
| local_bs, | |
| image_latents.size()[1], | |
| local_c | |
| ] | |
| ).to(control_context.device, control_context.dtype)], | |
| dim=1 | |
| ).to(self.transformer.dtype) | |
| noise_pred = self.transformer( | |
| hidden_states=latent_model_input, # (B, image_seq_len, C) | |
| timestep=timestep / 1000, | |
| guidance=guidance, | |
| encoder_hidden_states=prompt_embeds, | |
| txt_ids=text_ids, # B, text_seq_len, 4 | |
| img_ids=latent_image_ids, # B, image_seq_len, 4 | |
| joint_attention_kwargs=self._attention_kwargs, | |
| control_context=control_context_input, | |
| control_context_scale=control_context_scale, | |
| return_dict=False, | |
| )[0] | |
| noise_pred = noise_pred[:, : latents.size(1) :] | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| latents_dtype = latents.dtype | |
| latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] | |
| if latents.dtype != latents_dtype: | |
| if torch.backends.mps.is_available(): | |
| # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 | |
| latents = latents.to(latents_dtype) | |
| if callback_on_step_end is not None: | |
| callback_kwargs = {} | |
| for k in callback_on_step_end_tensor_inputs: | |
| callback_kwargs[k] = locals()[k] | |
| callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) | |
| latents = callback_outputs.pop("latents", latents) | |
| prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) | |
| # call the callback, if provided | |
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
| progress_bar.update() | |
| if XLA_AVAILABLE: | |
| xm.mark_step() | |
| self._current_timestep = None | |
| if output_type == "latent": | |
| image = latents | |
| else: | |
| latents = self._unpack_latents_with_ids(latents, latent_ids) | |
| latents_bn_mean = self.vae.bn.running_mean.view(1, -1, 1, 1).to(latents.device, latents.dtype) | |
| latents_bn_std = torch.sqrt(self.vae.bn.running_var.view(1, -1, 1, 1) + self.vae.config.batch_norm_eps).to( | |
| latents.device, latents.dtype | |
| ) | |
| latents = latents * latents_bn_std + latents_bn_mean | |
| latents = self._unpatchify_latents(latents) | |
| image = self.vae.decode(latents, return_dict=False)[0] | |
| image = self.image_processor.postprocess(image, output_type=output_type) | |
| # Offload all models | |
| self.maybe_free_model_hooks() | |
| if not return_dict: | |
| return (image,) | |
| return Flux2PipelineOutput(images=image) |