Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
c5e225f
·
verified ·
1 Parent(s): bca4375

Upload folder using huggingface_hub

Browse files
main/README.md CHANGED
@@ -76,7 +76,7 @@ Please also check out our [Community Scripts](https://github.com/huggingface/dif
76
  To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
77
 
78
  ```py
79
- pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="filename_in_the_community_folder")
80
  ```
81
 
82
  ## Example usages
@@ -363,7 +363,7 @@ clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K",
363
 
364
 
365
  guided_pipeline = DiffusionPipeline.from_pretrained(
366
- "runwayml/stable-diffusion-v1-5",
367
  custom_pipeline="clip_guided_stable_diffusion",
368
  clip_model=clip_model,
369
  feature_extractor=feature_extractor,
@@ -462,7 +462,7 @@ def download_image(url):
462
  response = requests.get(url)
463
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
464
 
465
- pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, variant="fp16")
466
  pipe.to("cuda")
467
  pipe.enable_attention_slicing()
468
 
@@ -921,7 +921,7 @@ if __name__ == "__main__":
921
  tensor_norm = torch.Tensor([[43.8203],[28.3668],[27.9345],[28.0084],[28.2958],[28.2576],[28.3373],[28.2695],[28.4097],[28.2790],[28.2825],[28.2807],[28.2775],[28.2708],[28.2682],[28.2624],[28.2589],[28.2611],[28.2616],[28.2639],[28.2613],[28.2566],[28.2615],[28.2665],[28.2799],[28.2885],[28.2852],[28.2863],[28.2780],[28.2818],[28.2764],[28.2532],[28.2412],[28.2336],[28.2514],[28.2734],[28.2763],[28.2977],[28.2971],[28.2948],[28.2818],[28.2676],[28.2831],[28.2890],[28.2979],[28.2999],[28.3117],[28.3363],[28.3554],[28.3626],[28.3589],[28.3597],[28.3543],[28.3660],[28.3731],[28.3717],[28.3812],[28.3753],[28.3810],[28.3777],[28.3693],[28.3713],[28.3670],[28.3691],[28.3679],[28.3624],[28.3703],[28.3703],[28.3720],[28.3594],[28.3576],[28.3562],[28.3438],[28.3376],[28.3389],[28.3433],[28.3191]])
922
 
923
  pipeline = DiffusionPipeline.from_pretrained(
924
- "runwayml/stable-diffusion-v1-5",
925
  text_encoder=text_encoder,
926
  tokenizer=tokenizer,
927
  custom_pipeline="gluegen"
@@ -1744,7 +1744,7 @@ from diffusers.utils import load_image
1744
  input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
1745
 
1746
  pipe = StableDiffusionReferencePipeline.from_pretrained(
1747
- "runwayml/stable-diffusion-v1-5",
1748
  safety_checker=None,
1749
  torch_dtype=torch.float16
1750
  ).to('cuda:0')
@@ -1801,7 +1801,7 @@ canny_image = Image.fromarray(image)
1801
 
1802
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
1803
  pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
1804
- "runwayml/stable-diffusion-v1-5",
1805
  controlnet=controlnet,
1806
  safety_checker=None,
1807
  torch_dtype=torch.float16
@@ -1857,7 +1857,7 @@ python -m pip install intel_extension_for_pytorch==<version_name> -f https://dev
1857
  **Note:** The setting of generated image height/width for `prepare_for_ipex()` should be same as the setting of pipeline inference.
1858
 
1859
  ```python
1860
- pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
1861
  # For Float32
1862
  pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) # value of image height/width should be consistent with the pipeline inference
1863
  # For BFloat16
@@ -1883,7 +1883,7 @@ from diffusers import StableDiffusionPipeline
1883
  import time
1884
 
1885
  prompt = "sailing ship in storm by Rembrandt"
1886
- model_id = "runwayml/stable-diffusion-v1-5"
1887
  # Helper function for time evaluation
1888
  def elapsed_time(pipeline, nb_pass=3, num_inference_steps=20):
1889
  # warmup
@@ -2507,7 +2507,7 @@ from diffusers import DiffusionPipeline
2507
 
2508
  # load the pipeline
2509
  # make sure you're logged in with `huggingface-cli login`
2510
- model_id_or_path = "runwayml/stable-diffusion-v1-5"
2511
  # can also be used with dreamlike-art/dreamlike-photoreal-2.0
2512
  pipe = DiffusionPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric").to("cuda")
2513
 
@@ -2548,7 +2548,7 @@ image.save("black_to_blue.png")
2548
 
2549
  *With enough feedbacks you can create very similar high quality images.*
2550
 
2551
- The original codebase can be found at [sd-fabric/fabric](https://github.com/sd-fabric/fabric), and available checkpoints are [dreamlike-art/dreamlike-photoreal-2.0](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0), [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), and [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1) (may give unexpected results).
2552
 
2553
  Let's have a look at the images (_512X512_)
2554
 
@@ -3579,7 +3579,7 @@ import torch
3579
  from diffusers import DDIMScheduler, DiffusionPipeline
3580
 
3581
  # Load the pipeline
3582
- model_path = "runwayml/stable-diffusion-v1-5"
3583
  scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
3584
  pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
3585
  pipe.to('cuda')
@@ -3674,7 +3674,7 @@ prompt = "A lying cat"
3674
  prompt = "A lying dog"
3675
 
3676
  # Float32 is essential to a well optimization
3677
- model_path = "runwayml/stable-diffusion-v1-5"
3678
  scheduler = DDIMScheduler(num_train_timesteps=1000, beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear")
3679
  pipeline = NullTextPipeline.from_pretrained(model_path, scheduler=scheduler, torch_dtype=torch.float32).to(device)
3680
 
@@ -3738,7 +3738,7 @@ controlnet = ControlNetModel.from_pretrained(
3738
 
3739
  # You can use any finetuned SD here
3740
  pipe = DiffusionPipeline.from_pretrained(
3741
- "runwayml/stable-diffusion-v1-5", controlnet=controlnet, custom_pipeline='rerender_a_video').to('cuda')
3742
 
3743
  # Optional: you can download vae-ft-mse-840000-ema-pruned.ckpt to enhance the results
3744
  # pipe.vae = AutoencoderKL.from_single_file(
@@ -4274,7 +4274,7 @@ from diffusers.utils import load_image, make_image_grid
4274
  from diffusers.utils.torch_utils import randn_tensor
4275
 
4276
  pipe = StableDiffusionPipeline.from_pretrained(
4277
- "runwayml/stable-diffusion-v1-5",
4278
  custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance",
4279
  torch_dtype=torch.float16
4280
  )
 
76
  To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
77
 
78
  ```py
79
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", custom_pipeline="filename_in_the_community_folder")
80
  ```
81
 
82
  ## Example usages
 
363
 
364
 
365
  guided_pipeline = DiffusionPipeline.from_pretrained(
366
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
367
  custom_pipeline="clip_guided_stable_diffusion",
368
  clip_model=clip_model,
369
  feature_extractor=feature_extractor,
 
462
  response = requests.get(url)
463
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
464
 
465
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, variant="fp16")
466
  pipe.to("cuda")
467
  pipe.enable_attention_slicing()
468
 
 
921
  tensor_norm = torch.Tensor([[43.8203],[28.3668],[27.9345],[28.0084],[28.2958],[28.2576],[28.3373],[28.2695],[28.4097],[28.2790],[28.2825],[28.2807],[28.2775],[28.2708],[28.2682],[28.2624],[28.2589],[28.2611],[28.2616],[28.2639],[28.2613],[28.2566],[28.2615],[28.2665],[28.2799],[28.2885],[28.2852],[28.2863],[28.2780],[28.2818],[28.2764],[28.2532],[28.2412],[28.2336],[28.2514],[28.2734],[28.2763],[28.2977],[28.2971],[28.2948],[28.2818],[28.2676],[28.2831],[28.2890],[28.2979],[28.2999],[28.3117],[28.3363],[28.3554],[28.3626],[28.3589],[28.3597],[28.3543],[28.3660],[28.3731],[28.3717],[28.3812],[28.3753],[28.3810],[28.3777],[28.3693],[28.3713],[28.3670],[28.3691],[28.3679],[28.3624],[28.3703],[28.3703],[28.3720],[28.3594],[28.3576],[28.3562],[28.3438],[28.3376],[28.3389],[28.3433],[28.3191]])
922
 
923
  pipeline = DiffusionPipeline.from_pretrained(
924
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
925
  text_encoder=text_encoder,
926
  tokenizer=tokenizer,
927
  custom_pipeline="gluegen"
 
1744
  input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
1745
 
1746
  pipe = StableDiffusionReferencePipeline.from_pretrained(
1747
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
1748
  safety_checker=None,
1749
  torch_dtype=torch.float16
1750
  ).to('cuda:0')
 
1801
 
1802
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
1803
  pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
1804
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
1805
  controlnet=controlnet,
1806
  safety_checker=None,
1807
  torch_dtype=torch.float16
 
1857
  **Note:** The setting of generated image height/width for `prepare_for_ipex()` should be same as the setting of pipeline inference.
1858
 
1859
  ```python
1860
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
1861
  # For Float32
1862
  pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) # value of image height/width should be consistent with the pipeline inference
1863
  # For BFloat16
 
1883
  import time
1884
 
1885
  prompt = "sailing ship in storm by Rembrandt"
1886
+ model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
1887
  # Helper function for time evaluation
1888
  def elapsed_time(pipeline, nb_pass=3, num_inference_steps=20):
1889
  # warmup
 
2507
 
2508
  # load the pipeline
2509
  # make sure you're logged in with `huggingface-cli login`
2510
+ model_id_or_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
2511
  # can also be used with dreamlike-art/dreamlike-photoreal-2.0
2512
  pipe = DiffusionPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric").to("cuda")
2513
 
 
2548
 
2549
  *With enough feedbacks you can create very similar high quality images.*
2550
 
2551
+ The original codebase can be found at [sd-fabric/fabric](https://github.com/sd-fabric/fabric), and available checkpoints are [dreamlike-art/dreamlike-photoreal-2.0](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0), [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5), and [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1) (may give unexpected results).
2552
 
2553
  Let's have a look at the images (_512X512_)
2554
 
 
3579
  from diffusers import DDIMScheduler, DiffusionPipeline
3580
 
3581
  # Load the pipeline
3582
+ model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
3583
  scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
3584
  pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
3585
  pipe.to('cuda')
 
3674
  prompt = "A lying dog"
3675
 
3676
  # Float32 is essential to a well optimization
3677
+ model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
3678
  scheduler = DDIMScheduler(num_train_timesteps=1000, beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear")
3679
  pipeline = NullTextPipeline.from_pretrained(model_path, scheduler=scheduler, torch_dtype=torch.float32).to(device)
3680
 
 
3738
 
3739
  # You can use any finetuned SD here
3740
  pipe = DiffusionPipeline.from_pretrained(
3741
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, custom_pipeline='rerender_a_video').to('cuda')
3742
 
3743
  # Optional: you can download vae-ft-mse-840000-ema-pruned.ckpt to enhance the results
3744
  # pipe.vae = AutoencoderKL.from_single_file(
 
4274
  from diffusers.utils.torch_utils import randn_tensor
4275
 
4276
  pipe = StableDiffusionPipeline.from_pretrained(
4277
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
4278
  custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance",
4279
  torch_dtype=torch.float16
4280
  )
main/README_community_scripts.md CHANGED
@@ -209,7 +209,7 @@ def seamless_tiling(pipeline, x_axis, y_axis):
209
  layer._conv_forward = asymmetric_conv2d_convforward.__get__(layer, torch.nn.Conv2d)
210
  return pipeline
211
 
212
- pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True)
213
  pipeline.enable_model_cpu_offload()
214
  prompt = ["texture of a red brick wall"]
215
  seed = 123456
 
209
  layer._conv_forward = asymmetric_conv2d_convforward.__get__(layer, torch.nn.Conv2d)
210
  return pipeline
211
 
212
+ pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True)
213
  pipeline.enable_model_cpu_offload()
214
  prompt = ["texture of a red brick wall"]
215
  seed = 123456
main/pipeline_flux_with_cfg.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
21
+
22
+ from diffusers.image_processor import VaeImageProcessor
23
+ from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin
24
+ from diffusers.models.autoencoders import AutoencoderKL
25
+ from diffusers.models.transformers import FluxTransformer2DModel
26
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
27
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
28
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
29
+ from diffusers.utils import (
30
+ USE_PEFT_BACKEND,
31
+ is_torch_xla_available,
32
+ logging,
33
+ replace_example_docstring,
34
+ scale_lora_layers,
35
+ unscale_lora_layers,
36
+ )
37
+ from diffusers.utils.torch_utils import randn_tensor
38
+
39
+
40
+ if is_torch_xla_available():
41
+ import torch_xla.core.xla_model as xm
42
+
43
+ XLA_AVAILABLE = True
44
+ else:
45
+ XLA_AVAILABLE = False
46
+
47
+
48
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
+
50
+ EXAMPLE_DOC_STRING = """
51
+ Examples:
52
+ ```py
53
+ >>> import torch
54
+ >>> from diffusers import FluxPipeline
55
+
56
+ >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
57
+ >>> pipe.to("cuda")
58
+ >>> prompt = "A cat holding a sign that says hello world"
59
+ >>> # Depending on the variant being used, the pipeline call will slightly vary.
60
+ >>> # Refer to the pipeline documentation for more details.
61
+ >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0]
62
+ >>> image.save("flux.png")
63
+ ```
64
+ """
65
+
66
+
67
+ def calculate_shift(
68
+ image_seq_len,
69
+ base_seq_len: int = 256,
70
+ max_seq_len: int = 4096,
71
+ base_shift: float = 0.5,
72
+ max_shift: float = 1.16,
73
+ ):
74
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
75
+ b = base_shift - m * base_seq_len
76
+ mu = image_seq_len * m + b
77
+ return mu
78
+
79
+
80
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
81
+ def retrieve_timesteps(
82
+ scheduler,
83
+ num_inference_steps: Optional[int] = None,
84
+ device: Optional[Union[str, torch.device]] = None,
85
+ timesteps: Optional[List[int]] = None,
86
+ sigmas: Optional[List[float]] = None,
87
+ **kwargs,
88
+ ):
89
+ """
90
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
91
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
92
+
93
+ Args:
94
+ scheduler (`SchedulerMixin`):
95
+ The scheduler to get timesteps from.
96
+ num_inference_steps (`int`):
97
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
98
+ must be `None`.
99
+ device (`str` or `torch.device`, *optional*):
100
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
101
+ timesteps (`List[int]`, *optional*):
102
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
103
+ `num_inference_steps` and `sigmas` must be `None`.
104
+ sigmas (`List[float]`, *optional*):
105
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
106
+ `num_inference_steps` and `timesteps` must be `None`.
107
+
108
+ Returns:
109
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
110
+ second element is the number of inference steps.
111
+ """
112
+ if timesteps is not None and sigmas is not None:
113
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
114
+ if timesteps is not None:
115
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
116
+ if not accepts_timesteps:
117
+ raise ValueError(
118
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
119
+ f" timestep schedules. Please check whether you are using the correct scheduler."
120
+ )
121
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
122
+ timesteps = scheduler.timesteps
123
+ num_inference_steps = len(timesteps)
124
+ elif sigmas is not None:
125
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
126
+ if not accept_sigmas:
127
+ raise ValueError(
128
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
129
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
130
+ )
131
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
132
+ timesteps = scheduler.timesteps
133
+ num_inference_steps = len(timesteps)
134
+ else:
135
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
136
+ timesteps = scheduler.timesteps
137
+ return timesteps, num_inference_steps
138
+
139
+
140
+ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
141
+ r"""
142
+ The Flux pipeline for text-to-image generation.
143
+
144
+ Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
145
+
146
+ Args:
147
+ transformer ([`FluxTransformer2DModel`]):
148
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
149
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
150
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
151
+ vae ([`AutoencoderKL`]):
152
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
153
+ text_encoder ([`CLIPTextModel`]):
154
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
155
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
156
+ text_encoder_2 ([`T5EncoderModel`]):
157
+ [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
158
+ the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
159
+ tokenizer (`CLIPTokenizer`):
160
+ Tokenizer of class
161
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
162
+ tokenizer_2 (`T5TokenizerFast`):
163
+ Second Tokenizer of class
164
+ [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
165
+ """
166
+
167
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
168
+ _optional_components = []
169
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
170
+
171
+ def __init__(
172
+ self,
173
+ scheduler: FlowMatchEulerDiscreteScheduler,
174
+ vae: AutoencoderKL,
175
+ text_encoder: CLIPTextModel,
176
+ tokenizer: CLIPTokenizer,
177
+ text_encoder_2: T5EncoderModel,
178
+ tokenizer_2: T5TokenizerFast,
179
+ transformer: FluxTransformer2DModel,
180
+ ):
181
+ super().__init__()
182
+
183
+ self.register_modules(
184
+ vae=vae,
185
+ text_encoder=text_encoder,
186
+ text_encoder_2=text_encoder_2,
187
+ tokenizer=tokenizer,
188
+ tokenizer_2=tokenizer_2,
189
+ transformer=transformer,
190
+ scheduler=scheduler,
191
+ )
192
+ self.vae_scale_factor = (
193
+ 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
194
+ )
195
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
196
+ self.tokenizer_max_length = (
197
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
198
+ )
199
+ self.default_sample_size = 64
200
+
201
+ def _get_t5_prompt_embeds(
202
+ self,
203
+ prompt: Union[str, List[str]] = None,
204
+ num_images_per_prompt: int = 1,
205
+ max_sequence_length: int = 512,
206
+ device: Optional[torch.device] = None,
207
+ dtype: Optional[torch.dtype] = None,
208
+ ):
209
+ device = device or self._execution_device
210
+ dtype = dtype or self.text_encoder.dtype
211
+
212
+ prompt = [prompt] if isinstance(prompt, str) else prompt
213
+ batch_size = len(prompt)
214
+
215
+ text_inputs = self.tokenizer_2(
216
+ prompt,
217
+ padding="max_length",
218
+ max_length=max_sequence_length,
219
+ truncation=True,
220
+ return_length=False,
221
+ return_overflowing_tokens=False,
222
+ return_tensors="pt",
223
+ )
224
+ text_input_ids = text_inputs.input_ids
225
+ untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
226
+
227
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
228
+ removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
229
+ logger.warning(
230
+ "The following part of your input was truncated because `max_sequence_length` is set to "
231
+ f" {max_sequence_length} tokens: {removed_text}"
232
+ )
233
+
234
+ prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
235
+
236
+ dtype = self.text_encoder_2.dtype
237
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
238
+
239
+ _, seq_len, _ = prompt_embeds.shape
240
+
241
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
242
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
243
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
244
+
245
+ return prompt_embeds
246
+
247
+ def _get_clip_prompt_embeds(
248
+ self,
249
+ prompt: Union[str, List[str]],
250
+ num_images_per_prompt: int = 1,
251
+ device: Optional[torch.device] = None,
252
+ ):
253
+ device = device or self._execution_device
254
+
255
+ prompt = [prompt] if isinstance(prompt, str) else prompt
256
+ batch_size = len(prompt)
257
+
258
+ text_inputs = self.tokenizer(
259
+ prompt,
260
+ padding="max_length",
261
+ max_length=self.tokenizer_max_length,
262
+ truncation=True,
263
+ return_overflowing_tokens=False,
264
+ return_length=False,
265
+ return_tensors="pt",
266
+ )
267
+
268
+ text_input_ids = text_inputs.input_ids
269
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
270
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
271
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
272
+ logger.warning(
273
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
274
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
275
+ )
276
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
277
+
278
+ # Use pooled output of CLIPTextModel
279
+ prompt_embeds = prompt_embeds.pooler_output
280
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
281
+
282
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
283
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
284
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
285
+
286
+ return prompt_embeds
287
+
288
+ def encode_prompt(
289
+ self,
290
+ prompt: Union[str, List[str]],
291
+ prompt_2: Union[str, List[str]],
292
+ device: Optional[torch.device] = None,
293
+ num_images_per_prompt: int = 1,
294
+ prompt_embeds: Optional[torch.FloatTensor] = None,
295
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
296
+ max_sequence_length: int = 512,
297
+ lora_scale: Optional[float] = None,
298
+ ):
299
+ r"""
300
+
301
+ Args:
302
+ prompt (`str` or `List[str]`, *optional*):
303
+ prompt to be encoded
304
+ prompt_2 (`str` or `List[str]`, *optional*):
305
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
306
+ used in all text-encoders
307
+ device: (`torch.device`):
308
+ torch device
309
+ num_images_per_prompt (`int`):
310
+ number of images that should be generated per prompt
311
+ prompt_embeds (`torch.FloatTensor`, *optional*):
312
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
313
+ provided, text embeddings will be generated from `prompt` input argument.
314
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
315
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
316
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
317
+ lora_scale (`float`, *optional*):
318
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
319
+ """
320
+ device = device or self._execution_device
321
+
322
+ # set lora scale so that monkey patched LoRA
323
+ # function of text encoder can correctly access it
324
+ if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
325
+ self._lora_scale = lora_scale
326
+
327
+ # dynamically adjust the LoRA scale
328
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
329
+ scale_lora_layers(self.text_encoder, lora_scale)
330
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
331
+ scale_lora_layers(self.text_encoder_2, lora_scale)
332
+
333
+ prompt = [prompt] if isinstance(prompt, str) else prompt
334
+
335
+ if prompt_embeds is None:
336
+ prompt_2 = prompt_2 or prompt
337
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
338
+
339
+ # We only use the pooled prompt output from the CLIPTextModel
340
+ pooled_prompt_embeds = self._get_clip_prompt_embeds(
341
+ prompt=prompt,
342
+ device=device,
343
+ num_images_per_prompt=num_images_per_prompt,
344
+ )
345
+ prompt_embeds = self._get_t5_prompt_embeds(
346
+ prompt=prompt_2,
347
+ num_images_per_prompt=num_images_per_prompt,
348
+ max_sequence_length=max_sequence_length,
349
+ device=device,
350
+ )
351
+
352
+ if self.text_encoder is not None:
353
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
354
+ # Retrieve the original scale by scaling back the LoRA layers
355
+ unscale_lora_layers(self.text_encoder, lora_scale)
356
+
357
+ if self.text_encoder_2 is not None:
358
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
359
+ # Retrieve the original scale by scaling back the LoRA layers
360
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
361
+
362
+ dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
363
+ text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
364
+
365
+ return prompt_embeds, pooled_prompt_embeds, text_ids
366
+
367
+ def check_inputs(
368
+ self,
369
+ prompt,
370
+ prompt_2,
371
+ height,
372
+ width,
373
+ negative_prompt=None,
374
+ negative_prompt_2=None,
375
+ prompt_embeds=None,
376
+ negative_prompt_embeds=None,
377
+ pooled_prompt_embeds=None,
378
+ negative_pooled_prompt_embeds=None,
379
+ callback_on_step_end_tensor_inputs=None,
380
+ max_sequence_length=None,
381
+ ):
382
+ if height % 8 != 0 or width % 8 != 0:
383
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
384
+
385
+ if callback_on_step_end_tensor_inputs is not None and not all(
386
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
387
+ ):
388
+ raise ValueError(
389
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
390
+ )
391
+
392
+ if prompt is not None and prompt_embeds is not None:
393
+ raise ValueError(
394
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
395
+ " only forward one of the two."
396
+ )
397
+ elif prompt_2 is not None and prompt_embeds is not None:
398
+ raise ValueError(
399
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
400
+ " only forward one of the two."
401
+ )
402
+ elif prompt is None and prompt_embeds is None:
403
+ raise ValueError(
404
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
405
+ )
406
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
407
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
408
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
409
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
410
+
411
+ if negative_prompt is not None and negative_prompt_embeds is not None:
412
+ raise ValueError(
413
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
414
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
415
+ )
416
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
417
+ raise ValueError(
418
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
419
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
420
+ )
421
+
422
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
423
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
424
+ raise ValueError(
425
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
426
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
427
+ f" {negative_prompt_embeds.shape}."
428
+ )
429
+
430
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
431
+ raise ValueError(
432
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
433
+ )
434
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
435
+ raise ValueError(
436
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
437
+ )
438
+
439
+ if max_sequence_length is not None and max_sequence_length > 512:
440
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
441
+
442
+ @staticmethod
443
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
444
+ latent_image_ids = torch.zeros(height // 2, width // 2, 3)
445
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None]
446
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :]
447
+
448
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
449
+
450
+ latent_image_ids = latent_image_ids.reshape(
451
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
452
+ )
453
+
454
+ return latent_image_ids.to(device=device, dtype=dtype)
455
+
456
+ @staticmethod
457
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
458
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
459
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
460
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
461
+
462
+ return latents
463
+
464
+ @staticmethod
465
+ def _unpack_latents(latents, height, width, vae_scale_factor):
466
+ batch_size, num_patches, channels = latents.shape
467
+
468
+ height = height // vae_scale_factor
469
+ width = width // vae_scale_factor
470
+
471
+ latents = latents.view(batch_size, height, width, channels // 4, 2, 2)
472
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
473
+
474
+ latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2)
475
+
476
+ return latents
477
+
478
+ def enable_vae_slicing(self):
479
+ r"""
480
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
481
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
482
+ """
483
+ self.vae.enable_slicing()
484
+
485
+ def disable_vae_slicing(self):
486
+ r"""
487
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
488
+ computing decoding in one step.
489
+ """
490
+ self.vae.disable_slicing()
491
+
492
+ def enable_vae_tiling(self):
493
+ r"""
494
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
495
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
496
+ processing larger images.
497
+ """
498
+ self.vae.enable_tiling()
499
+
500
+ def disable_vae_tiling(self):
501
+ r"""
502
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
503
+ computing decoding in one step.
504
+ """
505
+ self.vae.disable_tiling()
506
+
507
+ def prepare_latents(
508
+ self,
509
+ batch_size,
510
+ num_channels_latents,
511
+ height,
512
+ width,
513
+ dtype,
514
+ device,
515
+ generator,
516
+ latents=None,
517
+ ):
518
+ height = 2 * (int(height) // self.vae_scale_factor)
519
+ width = 2 * (int(width) // self.vae_scale_factor)
520
+
521
+ shape = (batch_size, num_channels_latents, height, width)
522
+
523
+ if latents is not None:
524
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
525
+ return latents.to(device=device, dtype=dtype), latent_image_ids
526
+
527
+ if isinstance(generator, list) and len(generator) != batch_size:
528
+ raise ValueError(
529
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
530
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
531
+ )
532
+
533
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
534
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
535
+
536
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
537
+
538
+ return latents, latent_image_ids
539
+
540
+ @property
541
+ def guidance_scale(self):
542
+ return self._guidance_scale
543
+
544
+ @property
545
+ def joint_attention_kwargs(self):
546
+ return self._joint_attention_kwargs
547
+
548
+ @property
549
+ def num_timesteps(self):
550
+ return self._num_timesteps
551
+
552
+ @property
553
+ def interrupt(self):
554
+ return self._interrupt
555
+
556
+ @torch.no_grad()
557
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
558
+ def __call__(
559
+ self,
560
+ prompt: Union[str, List[str]] = None,
561
+ prompt_2: Optional[Union[str, List[str]]] = None,
562
+ negative_prompt: Union[str, List[str]] = None, #
563
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
564
+ true_cfg: float = 1.0, #
565
+ height: Optional[int] = None,
566
+ width: Optional[int] = None,
567
+ num_inference_steps: int = 28,
568
+ timesteps: List[int] = None,
569
+ guidance_scale: float = 3.5,
570
+ num_images_per_prompt: Optional[int] = 1,
571
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
572
+ latents: Optional[torch.FloatTensor] = None,
573
+ prompt_embeds: Optional[torch.FloatTensor] = None,
574
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
575
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
576
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
577
+ output_type: Optional[str] = "pil",
578
+ return_dict: bool = True,
579
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
580
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
581
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
582
+ max_sequence_length: int = 512,
583
+ ):
584
+ r"""
585
+ Function invoked when calling the pipeline for generation.
586
+
587
+ Args:
588
+ prompt (`str` or `List[str]`, *optional*):
589
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
590
+ instead.
591
+ prompt_2 (`str` or `List[str]`, *optional*):
592
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
593
+ will be used instead
594
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
595
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
596
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
597
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
598
+ num_inference_steps (`int`, *optional*, defaults to 50):
599
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
600
+ expense of slower inference.
601
+ timesteps (`List[int]`, *optional*):
602
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
603
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
604
+ passed will be used. Must be in descending order.
605
+ guidance_scale (`float`, *optional*, defaults to 7.0):
606
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
607
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
608
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
609
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
610
+ usually at the expense of lower image quality.
611
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
612
+ The number of images to generate per prompt.
613
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
614
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
615
+ to make generation deterministic.
616
+ latents (`torch.FloatTensor`, *optional*):
617
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
618
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
619
+ tensor will ge generated by sampling using the supplied random `generator`.
620
+ prompt_embeds (`torch.FloatTensor`, *optional*):
621
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
622
+ provided, text embeddings will be generated from `prompt` input argument.
623
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
624
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
625
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
626
+ output_type (`str`, *optional*, defaults to `"pil"`):
627
+ The output format of the generate image. Choose between
628
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
629
+ return_dict (`bool`, *optional*, defaults to `True`):
630
+ Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
631
+ joint_attention_kwargs (`dict`, *optional*):
632
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
633
+ `self.processor` in
634
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
635
+ callback_on_step_end (`Callable`, *optional*):
636
+ A function that calls at the end of each denoising steps during the inference. The function is called
637
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
638
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
639
+ `callback_on_step_end_tensor_inputs`.
640
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
641
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
642
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
643
+ `._callback_tensor_inputs` attribute of your pipeline class.
644
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
645
+
646
+ Examples:
647
+
648
+ Returns:
649
+ [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
650
+ is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
651
+ images.
652
+ """
653
+
654
+ height = height or self.default_sample_size * self.vae_scale_factor
655
+ width = width or self.default_sample_size * self.vae_scale_factor
656
+
657
+ # 1. Check inputs. Raise error if not correct
658
+ self.check_inputs(
659
+ prompt,
660
+ prompt_2,
661
+ height,
662
+ width,
663
+ negative_prompt=negative_prompt,
664
+ negative_prompt_2=negative_prompt_2,
665
+ prompt_embeds=prompt_embeds,
666
+ negative_prompt_embeds=negative_prompt_embeds,
667
+ pooled_prompt_embeds=pooled_prompt_embeds,
668
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
669
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
670
+ max_sequence_length=max_sequence_length,
671
+ )
672
+
673
+ self._guidance_scale = guidance_scale
674
+ self._joint_attention_kwargs = joint_attention_kwargs
675
+ self._interrupt = False
676
+
677
+ # 2. Define call parameters
678
+ if prompt is not None and isinstance(prompt, str):
679
+ batch_size = 1
680
+ elif prompt is not None and isinstance(prompt, list):
681
+ batch_size = len(prompt)
682
+ else:
683
+ batch_size = prompt_embeds.shape[0]
684
+
685
+ device = self._execution_device
686
+
687
+ lora_scale = (
688
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
689
+ )
690
+ (
691
+ prompt_embeds,
692
+ pooled_prompt_embeds,
693
+ text_ids,
694
+ ) = self.encode_prompt(
695
+ prompt=prompt,
696
+ prompt_2=prompt_2,
697
+ prompt_embeds=prompt_embeds,
698
+ pooled_prompt_embeds=pooled_prompt_embeds,
699
+ device=device,
700
+ num_images_per_prompt=num_images_per_prompt,
701
+ max_sequence_length=max_sequence_length,
702
+ lora_scale=lora_scale,
703
+ )
704
+
705
+ # perform "real" CFG as suggested for distilled Flux models in https://github.com/ToTheBeginning/PuLID/blob/main/docs/pulid_for_flux.md
706
+ do_true_cfg = true_cfg > 1 and negative_prompt is not None
707
+ if do_true_cfg:
708
+ (
709
+ negative_prompt_embeds,
710
+ negative_pooled_prompt_embeds,
711
+ negative_text_ids,
712
+ ) = self.encode_prompt(
713
+ prompt=negative_prompt,
714
+ prompt_2=negative_prompt_2,
715
+ prompt_embeds=negative_prompt_embeds,
716
+ pooled_prompt_embeds=negative_pooled_prompt_embeds,
717
+ device=device,
718
+ num_images_per_prompt=num_images_per_prompt,
719
+ max_sequence_length=max_sequence_length,
720
+ lora_scale=lora_scale,
721
+ )
722
+
723
+ # 4. Prepare latent variables
724
+ num_channels_latents = self.transformer.config.in_channels // 4
725
+ latents, latent_image_ids = self.prepare_latents(
726
+ batch_size * num_images_per_prompt,
727
+ num_channels_latents,
728
+ height,
729
+ width,
730
+ prompt_embeds.dtype,
731
+ device,
732
+ generator,
733
+ latents,
734
+ )
735
+
736
+ # 5. Prepare timesteps
737
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
738
+ image_seq_len = latents.shape[1]
739
+ mu = calculate_shift(
740
+ image_seq_len,
741
+ self.scheduler.config.base_image_seq_len,
742
+ self.scheduler.config.max_image_seq_len,
743
+ self.scheduler.config.base_shift,
744
+ self.scheduler.config.max_shift,
745
+ )
746
+ timesteps, num_inference_steps = retrieve_timesteps(
747
+ self.scheduler,
748
+ num_inference_steps,
749
+ device,
750
+ timesteps,
751
+ sigmas,
752
+ mu=mu,
753
+ )
754
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
755
+ self._num_timesteps = len(timesteps)
756
+
757
+ # handle guidance
758
+ if self.transformer.config.guidance_embeds:
759
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
760
+ guidance = guidance.expand(latents.shape[0])
761
+ else:
762
+ guidance = None
763
+
764
+ # 6. Denoising loop
765
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
766
+ for i, t in enumerate(timesteps):
767
+ if self.interrupt:
768
+ continue
769
+
770
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
771
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
772
+
773
+ noise_pred = self.transformer(
774
+ hidden_states=latents,
775
+ timestep=timestep / 1000,
776
+ guidance=guidance,
777
+ pooled_projections=pooled_prompt_embeds,
778
+ encoder_hidden_states=prompt_embeds,
779
+ txt_ids=text_ids,
780
+ img_ids=latent_image_ids,
781
+ joint_attention_kwargs=self.joint_attention_kwargs,
782
+ return_dict=False,
783
+ )[0]
784
+
785
+ if do_true_cfg:
786
+ neg_noise_pred = self.transformer(
787
+ hidden_states=latents,
788
+ timestep=timestep / 1000,
789
+ guidance=guidance,
790
+ pooled_projections=negative_pooled_prompt_embeds,
791
+ encoder_hidden_states=negative_prompt_embeds,
792
+ txt_ids=negative_text_ids,
793
+ img_ids=latent_image_ids,
794
+ joint_attention_kwargs=self.joint_attention_kwargs,
795
+ return_dict=False,
796
+ )[0]
797
+
798
+ noise_pred = neg_noise_pred + true_cfg * (noise_pred - neg_noise_pred)
799
+
800
+ # compute the previous noisy sample x_t -> x_t-1
801
+ latents_dtype = latents.dtype
802
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
803
+
804
+ if latents.dtype != latents_dtype:
805
+ if torch.backends.mps.is_available():
806
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
807
+ latents = latents.to(latents_dtype)
808
+
809
+ if callback_on_step_end is not None:
810
+ callback_kwargs = {}
811
+ for k in callback_on_step_end_tensor_inputs:
812
+ callback_kwargs[k] = locals()[k]
813
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
814
+
815
+ latents = callback_outputs.pop("latents", latents)
816
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
817
+
818
+ # call the callback, if provided
819
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
820
+ progress_bar.update()
821
+
822
+ if XLA_AVAILABLE:
823
+ xm.mark_step()
824
+
825
+ if output_type == "latent":
826
+ image = latents
827
+
828
+ else:
829
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
830
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
831
+ image = self.vae.decode(latents, return_dict=False)[0]
832
+ image = self.image_processor.postprocess(image, output_type=output_type)
833
+
834
+ # Offload all models
835
+ self.maybe_free_model_hooks()
836
+
837
+ if not return_dict:
838
+ return (image,)
839
+
840
+ return FluxPipelineOutput(images=image)