Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
70dd1e4
·
verified ·
1 Parent(s): 46b6a06

Upload folder using huggingface_hub

Browse files
main/pipeline_flux_semantic_guidance.py ADDED
@@ -0,0 +1,1351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from transformers import (
21
+ CLIPImageProcessor,
22
+ CLIPTextModel,
23
+ CLIPTokenizer,
24
+ CLIPVisionModelWithProjection,
25
+ T5EncoderModel,
26
+ T5TokenizerFast,
27
+ )
28
+
29
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
+ from diffusers.loaders import FluxIPAdapterMixin, FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin
31
+ from diffusers.models.autoencoders import AutoencoderKL
32
+ from diffusers.models.transformers import FluxTransformer2DModel
33
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
34
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
35
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
36
+ from diffusers.utils import (
37
+ USE_PEFT_BACKEND,
38
+ is_torch_xla_available,
39
+ logging,
40
+ replace_example_docstring,
41
+ scale_lora_layers,
42
+ unscale_lora_layers,
43
+ )
44
+ from diffusers.utils.torch_utils import randn_tensor
45
+
46
+
47
+ if is_torch_xla_available():
48
+ import torch_xla.core.xla_model as xm
49
+
50
+ XLA_AVAILABLE = True
51
+ else:
52
+ XLA_AVAILABLE = False
53
+
54
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
55
+
56
+ EXAMPLE_DOC_STRING = """
57
+ Examples:
58
+ ```py
59
+ >>> import torch
60
+ >>> from diffusers import DiffusionPipeline
61
+
62
+ >>> pipe = DiffusionPipeline.from_pretrained(
63
+ >>> "black-forest-labs/FLUX.1-dev",
64
+ >>> custom_pipeline="pipeline_flux_semantic_guidance",
65
+ >>> torch_dtype=torch.bfloat16
66
+ >>> )
67
+ >>> pipe.to("cuda")
68
+ >>> prompt = "A cat holding a sign that says hello world"
69
+ >>> image = pipe(
70
+ >>> prompt=prompt,
71
+ >>> num_inference_steps=28,
72
+ >>> guidance_scale=3.5,
73
+ >>> editing_prompt=["cat", "dog"], # changes from cat to dog.
74
+ >>> reverse_editing_direction=[True, False],
75
+ >>> edit_warmup_steps=[6, 8],
76
+ >>> edit_guidance_scale=[6, 6.5],
77
+ >>> edit_threshold=[0.89, 0.89],
78
+ >>> edit_cooldown_steps = [25, 27],
79
+ >>> edit_momentum_scale=0.3,
80
+ >>> edit_mom_beta=0.6,
81
+ >>> generator=torch.Generator(device="cuda").manual_seed(6543),
82
+ >>> ).images[0]
83
+ >>> image.save("semantic_flux.png")
84
+ ```
85
+ """
86
+
87
+
88
+ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
89
+ def calculate_shift(
90
+ image_seq_len,
91
+ base_seq_len: int = 256,
92
+ max_seq_len: int = 4096,
93
+ base_shift: float = 0.5,
94
+ max_shift: float = 1.16,
95
+ ):
96
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
97
+ b = base_shift - m * base_seq_len
98
+ mu = image_seq_len * m + b
99
+ return mu
100
+
101
+
102
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
103
+ def retrieve_timesteps(
104
+ scheduler,
105
+ num_inference_steps: Optional[int] = None,
106
+ device: Optional[Union[str, torch.device]] = None,
107
+ timesteps: Optional[List[int]] = None,
108
+ sigmas: Optional[List[float]] = None,
109
+ **kwargs,
110
+ ):
111
+ r"""
112
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
113
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
114
+
115
+ Args:
116
+ scheduler (`SchedulerMixin`):
117
+ The scheduler to get timesteps from.
118
+ num_inference_steps (`int`):
119
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
120
+ must be `None`.
121
+ device (`str` or `torch.device`, *optional*):
122
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
123
+ timesteps (`List[int]`, *optional*):
124
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
125
+ `num_inference_steps` and `sigmas` must be `None`.
126
+ sigmas (`List[float]`, *optional*):
127
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
128
+ `num_inference_steps` and `timesteps` must be `None`.
129
+
130
+ Returns:
131
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
132
+ second element is the number of inference steps.
133
+ """
134
+ if timesteps is not None and sigmas is not None:
135
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
136
+ if timesteps is not None:
137
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
138
+ if not accepts_timesteps:
139
+ raise ValueError(
140
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
141
+ f" timestep schedules. Please check whether you are using the correct scheduler."
142
+ )
143
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
144
+ timesteps = scheduler.timesteps
145
+ num_inference_steps = len(timesteps)
146
+ elif sigmas is not None:
147
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
148
+ if not accept_sigmas:
149
+ raise ValueError(
150
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
151
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
152
+ )
153
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
154
+ timesteps = scheduler.timesteps
155
+ num_inference_steps = len(timesteps)
156
+ else:
157
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
158
+ timesteps = scheduler.timesteps
159
+ return timesteps, num_inference_steps
160
+
161
+
162
+ class FluxSemanticGuidancePipeline(
163
+ DiffusionPipeline,
164
+ FluxLoraLoaderMixin,
165
+ FromSingleFileMixin,
166
+ TextualInversionLoaderMixin,
167
+ FluxIPAdapterMixin,
168
+ ):
169
+ r"""
170
+ The Flux pipeline for text-to-image generation with semantic guidance.
171
+
172
+ Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
173
+
174
+ Args:
175
+ transformer ([`FluxTransformer2DModel`]):
176
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
177
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
178
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
179
+ vae ([`AutoencoderKL`]):
180
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
181
+ text_encoder ([`CLIPTextModel`]):
182
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
183
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
184
+ text_encoder_2 ([`T5EncoderModel`]):
185
+ [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
186
+ the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
187
+ tokenizer (`CLIPTokenizer`):
188
+ Tokenizer of class
189
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
190
+ tokenizer_2 (`T5TokenizerFast`):
191
+ Second Tokenizer of class
192
+ [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
193
+ """
194
+
195
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae"
196
+ _optional_components = ["image_encoder", "feature_extractor"]
197
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
198
+
199
+ def __init__(
200
+ self,
201
+ scheduler: FlowMatchEulerDiscreteScheduler,
202
+ vae: AutoencoderKL,
203
+ text_encoder: CLIPTextModel,
204
+ tokenizer: CLIPTokenizer,
205
+ text_encoder_2: T5EncoderModel,
206
+ tokenizer_2: T5TokenizerFast,
207
+ transformer: FluxTransformer2DModel,
208
+ image_encoder: CLIPVisionModelWithProjection = None,
209
+ feature_extractor: CLIPImageProcessor = None,
210
+ ):
211
+ super().__init__()
212
+
213
+ self.register_modules(
214
+ vae=vae,
215
+ text_encoder=text_encoder,
216
+ text_encoder_2=text_encoder_2,
217
+ tokenizer=tokenizer,
218
+ tokenizer_2=tokenizer_2,
219
+ transformer=transformer,
220
+ scheduler=scheduler,
221
+ image_encoder=image_encoder,
222
+ feature_extractor=feature_extractor,
223
+ )
224
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
225
+ # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
226
+ # by the patch size. So the vae scale factor is multiplied by the patch size to account for this
227
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
228
+ self.tokenizer_max_length = (
229
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
230
+ )
231
+ self.default_sample_size = 128
232
+
233
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds
234
+ def _get_t5_prompt_embeds(
235
+ self,
236
+ prompt: Union[str, List[str]] = None,
237
+ num_images_per_prompt: int = 1,
238
+ max_sequence_length: int = 512,
239
+ device: Optional[torch.device] = None,
240
+ dtype: Optional[torch.dtype] = None,
241
+ ):
242
+ device = device or self._execution_device
243
+ dtype = dtype or self.text_encoder.dtype
244
+
245
+ prompt = [prompt] if isinstance(prompt, str) else prompt
246
+ batch_size = len(prompt)
247
+
248
+ if isinstance(self, TextualInversionLoaderMixin):
249
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
250
+
251
+ text_inputs = self.tokenizer_2(
252
+ prompt,
253
+ padding="max_length",
254
+ max_length=max_sequence_length,
255
+ truncation=True,
256
+ return_length=False,
257
+ return_overflowing_tokens=False,
258
+ return_tensors="pt",
259
+ )
260
+ text_input_ids = text_inputs.input_ids
261
+ untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
262
+
263
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
264
+ removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
265
+ logger.warning(
266
+ "The following part of your input was truncated because `max_sequence_length` is set to "
267
+ f" {max_sequence_length} tokens: {removed_text}"
268
+ )
269
+
270
+ prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
271
+
272
+ dtype = self.text_encoder_2.dtype
273
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
274
+
275
+ _, seq_len, _ = prompt_embeds.shape
276
+
277
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
278
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
279
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
280
+
281
+ return prompt_embeds
282
+
283
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds
284
+ def _get_clip_prompt_embeds(
285
+ self,
286
+ prompt: Union[str, List[str]],
287
+ num_images_per_prompt: int = 1,
288
+ device: Optional[torch.device] = None,
289
+ ):
290
+ device = device or self._execution_device
291
+
292
+ prompt = [prompt] if isinstance(prompt, str) else prompt
293
+ batch_size = len(prompt)
294
+
295
+ if isinstance(self, TextualInversionLoaderMixin):
296
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
297
+
298
+ text_inputs = self.tokenizer(
299
+ prompt,
300
+ padding="max_length",
301
+ max_length=self.tokenizer_max_length,
302
+ truncation=True,
303
+ return_overflowing_tokens=False,
304
+ return_length=False,
305
+ return_tensors="pt",
306
+ )
307
+
308
+ text_input_ids = text_inputs.input_ids
309
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
310
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
311
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
312
+ logger.warning(
313
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
314
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
315
+ )
316
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
317
+
318
+ # Use pooled output of CLIPTextModel
319
+ prompt_embeds = prompt_embeds.pooler_output
320
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
321
+
322
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
323
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
324
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
325
+
326
+ return prompt_embeds
327
+
328
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt
329
+ def encode_prompt(
330
+ self,
331
+ prompt: Union[str, List[str]],
332
+ prompt_2: Union[str, List[str]],
333
+ device: Optional[torch.device] = None,
334
+ num_images_per_prompt: int = 1,
335
+ prompt_embeds: Optional[torch.FloatTensor] = None,
336
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
337
+ max_sequence_length: int = 512,
338
+ lora_scale: Optional[float] = None,
339
+ ):
340
+ r"""
341
+
342
+ Args:
343
+ prompt (`str` or `List[str]`, *optional*):
344
+ prompt to be encoded
345
+ prompt_2 (`str` or `List[str]`, *optional*):
346
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
347
+ used in all text-encoders
348
+ device: (`torch.device`):
349
+ torch device
350
+ num_images_per_prompt (`int`):
351
+ number of images that should be generated per prompt
352
+ prompt_embeds (`torch.FloatTensor`, *optional*):
353
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
354
+ provided, text embeddings will be generated from `prompt` input argument.
355
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
356
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
357
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
358
+ lora_scale (`float`, *optional*):
359
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
360
+ """
361
+ device = device or self._execution_device
362
+
363
+ # set lora scale so that monkey patched LoRA
364
+ # function of text encoder can correctly access it
365
+ if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
366
+ self._lora_scale = lora_scale
367
+
368
+ # dynamically adjust the LoRA scale
369
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
370
+ scale_lora_layers(self.text_encoder, lora_scale)
371
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
372
+ scale_lora_layers(self.text_encoder_2, lora_scale)
373
+
374
+ prompt = [prompt] if isinstance(prompt, str) else prompt
375
+
376
+ if prompt_embeds is None:
377
+ prompt_2 = prompt_2 or prompt
378
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
379
+
380
+ # We only use the pooled prompt output from the CLIPTextModel
381
+ pooled_prompt_embeds = self._get_clip_prompt_embeds(
382
+ prompt=prompt,
383
+ device=device,
384
+ num_images_per_prompt=num_images_per_prompt,
385
+ )
386
+ prompt_embeds = self._get_t5_prompt_embeds(
387
+ prompt=prompt_2,
388
+ num_images_per_prompt=num_images_per_prompt,
389
+ max_sequence_length=max_sequence_length,
390
+ device=device,
391
+ )
392
+
393
+ if self.text_encoder is not None:
394
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
395
+ # Retrieve the original scale by scaling back the LoRA layers
396
+ unscale_lora_layers(self.text_encoder, lora_scale)
397
+
398
+ if self.text_encoder_2 is not None:
399
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
400
+ # Retrieve the original scale by scaling back the LoRA layers
401
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
402
+
403
+ dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
404
+ text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
405
+
406
+ return prompt_embeds, pooled_prompt_embeds, text_ids
407
+
408
+ def encode_text_with_editing(
409
+ self,
410
+ prompt: Union[str, List[str]],
411
+ prompt_2: Union[str, List[str]],
412
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
413
+ editing_prompt: Optional[List[str]] = None,
414
+ editing_prompt_2: Optional[List[str]] = None,
415
+ editing_prompt_embeds: Optional[torch.FloatTensor] = None,
416
+ pooled_editing_prompt_embeds: Optional[torch.FloatTensor] = None,
417
+ device: Optional[torch.device] = None,
418
+ num_images_per_prompt: int = 1,
419
+ max_sequence_length: int = 512,
420
+ lora_scale: Optional[float] = None,
421
+ ):
422
+ """
423
+ Encode text prompts with editing prompts and negative prompts for semantic guidance.
424
+
425
+ Args:
426
+ prompt (`str` or `List[str]`):
427
+ The prompt or prompts to guide image generation.
428
+ prompt_2 (`str` or `List[str]`):
429
+ The prompt or prompts to guide image generation for second tokenizer.
430
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
431
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
432
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
433
+ editing_prompt (`str` or `List[str]`, *optional*):
434
+ The editing prompts for semantic guidance.
435
+ editing_prompt_2 (`str` or `List[str]`, *optional*):
436
+ The editing prompts for semantic guidance for second tokenizer.
437
+ editing_prompt_embeds (`torch.FloatTensor`, *optional*):
438
+ Pre-computed embeddings for editing prompts.
439
+ pooled_editing_prompt_embeds (`torch.FloatTensor`, *optional*):
440
+ Pre-computed pooled embeddings for editing prompts.
441
+ device (`torch.device`, *optional*):
442
+ The device to use for computation.
443
+ num_images_per_prompt (`int`, defaults to 1):
444
+ Number of images to generate per prompt.
445
+ max_sequence_length (`int`, defaults to 512):
446
+ Maximum sequence length for text encoding.
447
+ lora_scale (`float`, *optional*):
448
+ Scale factor for LoRA layers if used.
449
+
450
+ Returns:
451
+ tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, int]:
452
+ A tuple containing the prompt embeddings, pooled prompt embeddings,
453
+ text IDs, and number of enabled editing prompts.
454
+ """
455
+ device = device or self._execution_device
456
+
457
+ if prompt is not None and isinstance(prompt, str):
458
+ batch_size = 1
459
+ elif prompt is not None and isinstance(prompt, list):
460
+ batch_size = len(prompt)
461
+ else:
462
+ raise ValueError("Prompt must be provided as string or list of strings")
463
+
464
+ # Get base prompt embeddings
465
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
466
+ prompt=prompt,
467
+ prompt_2=prompt_2,
468
+ pooled_prompt_embeds=pooled_prompt_embeds,
469
+ device=device,
470
+ num_images_per_prompt=num_images_per_prompt,
471
+ max_sequence_length=max_sequence_length,
472
+ lora_scale=lora_scale,
473
+ )
474
+
475
+ # Handle editing prompts
476
+ if editing_prompt_embeds is not None:
477
+ enabled_editing_prompts = int(editing_prompt_embeds.shape[0])
478
+ edit_text_ids = []
479
+ elif editing_prompt is not None:
480
+ editing_prompt_embeds = []
481
+ pooled_editing_prompt_embeds = []
482
+ edit_text_ids = []
483
+
484
+ editing_prompt_2 = editing_prompt if editing_prompt_2 is None else editing_prompt_2
485
+ for edit_1, edit_2 in zip(editing_prompt, editing_prompt_2):
486
+ e_prompt_embeds, pooled_embeds, e_ids = self.encode_prompt(
487
+ prompt=edit_1,
488
+ prompt_2=edit_2,
489
+ device=device,
490
+ num_images_per_prompt=num_images_per_prompt,
491
+ max_sequence_length=max_sequence_length,
492
+ lora_scale=lora_scale,
493
+ )
494
+ editing_prompt_embeds.append(e_prompt_embeds)
495
+ pooled_editing_prompt_embeds.append(pooled_embeds)
496
+ edit_text_ids.append(e_ids)
497
+
498
+ enabled_editing_prompts = len(editing_prompt)
499
+
500
+ else:
501
+ edit_text_ids = []
502
+ enabled_editing_prompts = 0
503
+
504
+ if enabled_editing_prompts:
505
+ for idx in range(enabled_editing_prompts):
506
+ editing_prompt_embeds[idx] = torch.cat([editing_prompt_embeds[idx]] * batch_size, dim=0)
507
+ pooled_editing_prompt_embeds[idx] = torch.cat([pooled_editing_prompt_embeds[idx]] * batch_size, dim=0)
508
+
509
+ return (
510
+ prompt_embeds,
511
+ pooled_prompt_embeds,
512
+ editing_prompt_embeds,
513
+ pooled_editing_prompt_embeds,
514
+ text_ids,
515
+ edit_text_ids,
516
+ enabled_editing_prompts,
517
+ )
518
+
519
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_image
520
+ def encode_image(self, image, device, num_images_per_prompt):
521
+ dtype = next(self.image_encoder.parameters()).dtype
522
+
523
+ if not isinstance(image, torch.Tensor):
524
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
525
+
526
+ image = image.to(device=device, dtype=dtype)
527
+ image_embeds = self.image_encoder(image).image_embeds
528
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
529
+ return image_embeds
530
+
531
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_ip_adapter_image_embeds
532
+ def prepare_ip_adapter_image_embeds(
533
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
534
+ ):
535
+ image_embeds = []
536
+ if ip_adapter_image_embeds is None:
537
+ if not isinstance(ip_adapter_image, list):
538
+ ip_adapter_image = [ip_adapter_image]
539
+
540
+ if len(ip_adapter_image) != len(self.transformer.encoder_hid_proj.image_projection_layers):
541
+ raise ValueError(
542
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.transformer.encoder_hid_proj.image_projection_layers)} IP Adapters."
543
+ )
544
+
545
+ for single_ip_adapter_image, image_proj_layer in zip(
546
+ ip_adapter_image, self.transformer.encoder_hid_proj.image_projection_layers
547
+ ):
548
+ single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1)
549
+
550
+ image_embeds.append(single_image_embeds[None, :])
551
+ else:
552
+ for single_image_embeds in ip_adapter_image_embeds:
553
+ image_embeds.append(single_image_embeds)
554
+
555
+ ip_adapter_image_embeds = []
556
+ for i, single_image_embeds in enumerate(image_embeds):
557
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
558
+ single_image_embeds = single_image_embeds.to(device=device)
559
+ ip_adapter_image_embeds.append(single_image_embeds)
560
+
561
+ return ip_adapter_image_embeds
562
+
563
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.check_inputs
564
+ def check_inputs(
565
+ self,
566
+ prompt,
567
+ prompt_2,
568
+ height,
569
+ width,
570
+ negative_prompt=None,
571
+ negative_prompt_2=None,
572
+ prompt_embeds=None,
573
+ negative_prompt_embeds=None,
574
+ pooled_prompt_embeds=None,
575
+ negative_pooled_prompt_embeds=None,
576
+ callback_on_step_end_tensor_inputs=None,
577
+ max_sequence_length=None,
578
+ ):
579
+ if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
580
+ logger.warning(
581
+ f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
582
+ )
583
+
584
+ if callback_on_step_end_tensor_inputs is not None and not all(
585
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
586
+ ):
587
+ raise ValueError(
588
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
589
+ )
590
+
591
+ if prompt is not None and prompt_embeds is not None:
592
+ raise ValueError(
593
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
594
+ " only forward one of the two."
595
+ )
596
+ elif prompt_2 is not None and prompt_embeds is not None:
597
+ raise ValueError(
598
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
599
+ " only forward one of the two."
600
+ )
601
+ elif prompt is None and prompt_embeds is None:
602
+ raise ValueError(
603
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
604
+ )
605
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
606
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
607
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
608
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
609
+
610
+ if negative_prompt is not None and negative_prompt_embeds is not None:
611
+ raise ValueError(
612
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
613
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
614
+ )
615
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
616
+ raise ValueError(
617
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
618
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
619
+ )
620
+
621
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
622
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
623
+ raise ValueError(
624
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
625
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
626
+ f" {negative_prompt_embeds.shape}."
627
+ )
628
+
629
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
630
+ raise ValueError(
631
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
632
+ )
633
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
634
+ raise ValueError(
635
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
636
+ )
637
+
638
+ if max_sequence_length is not None and max_sequence_length > 512:
639
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
640
+
641
+ @staticmethod
642
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids
643
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
644
+ latent_image_ids = torch.zeros(height, width, 3)
645
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
646
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :]
647
+
648
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
649
+
650
+ latent_image_ids = latent_image_ids.reshape(
651
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
652
+ )
653
+
654
+ return latent_image_ids.to(device=device, dtype=dtype)
655
+
656
+ @staticmethod
657
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents
658
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
659
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
660
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
661
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
662
+
663
+ return latents
664
+
665
+ @staticmethod
666
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents
667
+ def _unpack_latents(latents, height, width, vae_scale_factor):
668
+ batch_size, num_patches, channels = latents.shape
669
+
670
+ # VAE applies 8x compression on images but we must also account for packing which requires
671
+ # latent height and width to be divisible by 2.
672
+ height = 2 * (int(height) // (vae_scale_factor * 2))
673
+ width = 2 * (int(width) // (vae_scale_factor * 2))
674
+
675
+ latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
676
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
677
+
678
+ latents = latents.reshape(batch_size, channels // (2 * 2), height, width)
679
+
680
+ return latents
681
+
682
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_slicing
683
+ def enable_vae_slicing(self):
684
+ r"""
685
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
686
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
687
+ """
688
+ self.vae.enable_slicing()
689
+
690
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_slicing
691
+ def disable_vae_slicing(self):
692
+ r"""
693
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
694
+ computing decoding in one step.
695
+ """
696
+ self.vae.disable_slicing()
697
+
698
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_tiling
699
+ def enable_vae_tiling(self):
700
+ r"""
701
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
702
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
703
+ processing larger images.
704
+ """
705
+ self.vae.enable_tiling()
706
+
707
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling
708
+ def disable_vae_tiling(self):
709
+ r"""
710
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
711
+ computing decoding in one step.
712
+ """
713
+ self.vae.disable_tiling()
714
+
715
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents
716
+ def prepare_latents(
717
+ self,
718
+ batch_size,
719
+ num_channels_latents,
720
+ height,
721
+ width,
722
+ dtype,
723
+ device,
724
+ generator,
725
+ latents=None,
726
+ ):
727
+ # VAE applies 8x compression on images but we must also account for packing which requires
728
+ # latent height and width to be divisible by 2.
729
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
730
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
731
+
732
+ shape = (batch_size, num_channels_latents, height, width)
733
+
734
+ if latents is not None:
735
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
736
+ return latents.to(device=device, dtype=dtype), latent_image_ids
737
+
738
+ if isinstance(generator, list) and len(generator) != batch_size:
739
+ raise ValueError(
740
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
741
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
742
+ )
743
+
744
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
745
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
746
+
747
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
748
+
749
+ return latents, latent_image_ids
750
+
751
+ @property
752
+ def guidance_scale(self):
753
+ return self._guidance_scale
754
+
755
+ @property
756
+ def joint_attention_kwargs(self):
757
+ return self._joint_attention_kwargs
758
+
759
+ @property
760
+ def num_timesteps(self):
761
+ return self._num_timesteps
762
+
763
+ @property
764
+ def interrupt(self):
765
+ return self._interrupt
766
+
767
+ @torch.no_grad()
768
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
769
+ def __call__(
770
+ self,
771
+ prompt: Union[str, List[str]] = None,
772
+ prompt_2: Optional[Union[str, List[str]]] = None,
773
+ negative_prompt: Union[str, List[str]] = None,
774
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
775
+ true_cfg_scale: float = 1.0,
776
+ height: Optional[int] = None,
777
+ width: Optional[int] = None,
778
+ num_inference_steps: int = 28,
779
+ sigmas: Optional[List[float]] = None,
780
+ guidance_scale: float = 3.5,
781
+ num_images_per_prompt: Optional[int] = 1,
782
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
783
+ latents: Optional[torch.FloatTensor] = None,
784
+ prompt_embeds: Optional[torch.FloatTensor] = None,
785
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
786
+ ip_adapter_image: Optional[PipelineImageInput] = None,
787
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
788
+ negative_ip_adapter_image: Optional[PipelineImageInput] = None,
789
+ negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
790
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
791
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
792
+ output_type: Optional[str] = "pil",
793
+ return_dict: bool = True,
794
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
795
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
796
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
797
+ max_sequence_length: int = 512,
798
+ editing_prompt: Optional[Union[str, List[str]]] = None,
799
+ editing_prompt_2: Optional[Union[str, List[str]]] = None,
800
+ editing_prompt_embeds: Optional[torch.FloatTensor] = None,
801
+ pooled_editing_prompt_embeds: Optional[torch.FloatTensor] = None,
802
+ reverse_editing_direction: Optional[Union[bool, List[bool]]] = False,
803
+ edit_guidance_scale: Optional[Union[float, List[float]]] = 5,
804
+ edit_warmup_steps: Optional[Union[int, List[int]]] = 8,
805
+ edit_cooldown_steps: Optional[Union[int, List[int]]] = None,
806
+ edit_threshold: Optional[Union[float, List[float]]] = 0.9,
807
+ edit_momentum_scale: Optional[float] = 0.1,
808
+ edit_mom_beta: Optional[float] = 0.4,
809
+ edit_weights: Optional[List[float]] = None,
810
+ sem_guidance: Optional[List[torch.Tensor]] = None,
811
+ ):
812
+ r"""
813
+ Function invoked when calling the pipeline for generation.
814
+
815
+ Args:
816
+ prompt (`str` or `List[str]`, *optional*):
817
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
818
+ instead.
819
+ prompt_2 (`str` or `List[str]`, *optional*):
820
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
821
+ will be used instead.
822
+ negative_prompt (`str` or `List[str]`, *optional*):
823
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
824
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
825
+ not greater than `1`).
826
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
827
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
828
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
829
+ true_cfg_scale (`float`, *optional*, defaults to 1.0):
830
+ When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance.
831
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
832
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
833
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
834
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
835
+ num_inference_steps (`int`, *optional*, defaults to 50):
836
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
837
+ expense of slower inference.
838
+ sigmas (`List[float]`, *optional*):
839
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
840
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
841
+ will be used.
842
+ guidance_scale (`float`, *optional*, defaults to 7.0):
843
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
844
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
845
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
846
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
847
+ usually at the expense of lower image quality.
848
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
849
+ The number of images to generate per prompt.
850
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
851
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
852
+ to make generation deterministic.
853
+ latents (`torch.FloatTensor`, *optional*):
854
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
855
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
856
+ tensor will ge generated by sampling using the supplied random `generator`.
857
+ prompt_embeds (`torch.FloatTensor`, *optional*):
858
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
859
+ provided, text embeddings will be generated from `prompt` input argument.
860
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
861
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
862
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
863
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
864
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
865
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
866
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not
867
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
868
+ negative_ip_adapter_image:
869
+ (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
870
+ negative_ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
871
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
872
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not
873
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
874
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
875
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
876
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
877
+ argument.
878
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
879
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
880
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
881
+ input argument.
882
+ output_type (`str`, *optional*, defaults to `"pil"`):
883
+ The output format of the generate image. Choose between
884
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
885
+ return_dict (`bool`, *optional*, defaults to `True`):
886
+ Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
887
+ joint_attention_kwargs (`dict`, *optional*):
888
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
889
+ `self.processor` in
890
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
891
+ callback_on_step_end (`Callable`, *optional*):
892
+ A function that calls at the end of each denoising steps during the inference. The function is called
893
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
894
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
895
+ `callback_on_step_end_tensor_inputs`.
896
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
897
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
898
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
899
+ `._callback_tensor_inputs` attribute of your pipeline class.
900
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
901
+ editing_prompt (`str` or `List[str]`, *optional*):
902
+ The prompt or prompts to guide the image editing. If not defined, no editing will be performed.
903
+ editing_prompt_2 (`str` or `List[str]`, *optional*):
904
+ The prompt or prompts to guide the image editing. If not defined, will use editing_prompt instead.
905
+ editing_prompt_embeds (`torch.FloatTensor`, *optional*):
906
+ Pre-generated text embeddings for editing. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
907
+ If not provided, text embeddings will be generated from `editing_prompt` input argument.
908
+ reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`):
909
+ Whether to reverse the editing direction for each editing prompt.
910
+ edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5):
911
+ Guidance scale for the editing process. If provided as a list, each value corresponds to an editing prompt.
912
+ edit_warmup_steps (`int` or `List[int]`, *optional*, defaults to 10):
913
+ Number of warmup steps for editing guidance. If provided as a list, each value corresponds to an editing prompt.
914
+ edit_cooldown_steps (`int` or `List[int]`, *optional*, defaults to None):
915
+ Number of cooldown steps for editing guidance. If provided as a list, each value corresponds to an editing prompt.
916
+ edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9):
917
+ Threshold for editing guidance. If provided as a list, each value corresponds to an editing prompt.
918
+ edit_momentum_scale (`float`, *optional*, defaults to 0.1):
919
+ Scale of momentum to be added to the editing guidance at each diffusion step.
920
+ edit_mom_beta (`float`, *optional*, defaults to 0.4):
921
+ Beta value for momentum calculation in editing guidance.
922
+ edit_weights (`List[float]`, *optional*):
923
+ Weights for each editing prompt.
924
+ sem_guidance (`List[torch.Tensor]`, *optional*):
925
+ Pre-generated semantic guidance. If provided, it will be used instead of calculating guidance from editing prompts.
926
+
927
+ Examples:
928
+
929
+ Returns:
930
+ [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
931
+ is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
932
+ images.
933
+ """
934
+
935
+ height = height or self.default_sample_size * self.vae_scale_factor
936
+ width = width or self.default_sample_size * self.vae_scale_factor
937
+
938
+ # 1. Check inputs. Raise error if not correct
939
+ self.check_inputs(
940
+ prompt,
941
+ prompt_2,
942
+ height,
943
+ width,
944
+ prompt_embeds=prompt_embeds,
945
+ pooled_prompt_embeds=pooled_prompt_embeds,
946
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
947
+ max_sequence_length=max_sequence_length,
948
+ )
949
+
950
+ self._guidance_scale = guidance_scale
951
+ self._joint_attention_kwargs = joint_attention_kwargs
952
+ self._interrupt = False
953
+
954
+ # 2. Define call parameters
955
+ if prompt is not None and isinstance(prompt, str):
956
+ batch_size = 1
957
+ elif prompt is not None and isinstance(prompt, list):
958
+ batch_size = len(prompt)
959
+ else:
960
+ batch_size = prompt_embeds.shape[0]
961
+
962
+ if editing_prompt:
963
+ enable_edit_guidance = True
964
+ if isinstance(editing_prompt, str):
965
+ editing_prompt = [editing_prompt]
966
+ enabled_editing_prompts = len(editing_prompt)
967
+ elif editing_prompt_embeds is not None:
968
+ enable_edit_guidance = True
969
+ enabled_editing_prompts = editing_prompt_embeds.shape[0]
970
+ else:
971
+ enabled_editing_prompts = 0
972
+ enable_edit_guidance = False
973
+
974
+ has_neg_prompt = negative_prompt is not None or (
975
+ negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None
976
+ )
977
+ do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
978
+
979
+ device = self._execution_device
980
+
981
+ lora_scale = (
982
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
983
+ )
984
+ (
985
+ prompt_embeds,
986
+ pooled_prompt_embeds,
987
+ editing_prompts_embeds,
988
+ pooled_editing_prompt_embeds,
989
+ text_ids,
990
+ edit_text_ids,
991
+ enabled_editing_prompts,
992
+ ) = self.encode_text_with_editing(
993
+ prompt=prompt,
994
+ prompt_2=prompt_2,
995
+ pooled_prompt_embeds=pooled_prompt_embeds,
996
+ editing_prompt=editing_prompt,
997
+ editing_prompt_2=editing_prompt_2,
998
+ pooled_editing_prompt_embeds=pooled_editing_prompt_embeds,
999
+ lora_scale=lora_scale,
1000
+ device=device,
1001
+ num_images_per_prompt=num_images_per_prompt,
1002
+ max_sequence_length=max_sequence_length,
1003
+ )
1004
+
1005
+ if do_true_cfg:
1006
+ (
1007
+ negative_prompt_embeds,
1008
+ negative_pooled_prompt_embeds,
1009
+ _,
1010
+ ) = self.encode_prompt(
1011
+ prompt=negative_prompt,
1012
+ prompt_2=negative_prompt_2,
1013
+ prompt_embeds=negative_prompt_embeds,
1014
+ pooled_prompt_embeds=negative_pooled_prompt_embeds,
1015
+ device=device,
1016
+ num_images_per_prompt=num_images_per_prompt,
1017
+ max_sequence_length=max_sequence_length,
1018
+ lora_scale=lora_scale,
1019
+ )
1020
+ negative_prompt_embeds = torch.cat([negative_prompt_embeds] * batch_size, dim=0)
1021
+ negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds] * batch_size, dim=0)
1022
+
1023
+ # 4. Prepare latent variables
1024
+ num_channels_latents = self.transformer.config.in_channels // 4
1025
+ latents, latent_image_ids = self.prepare_latents(
1026
+ batch_size * num_images_per_prompt,
1027
+ num_channels_latents,
1028
+ height,
1029
+ width,
1030
+ prompt_embeds.dtype,
1031
+ device,
1032
+ generator,
1033
+ latents,
1034
+ )
1035
+
1036
+ # 5. Prepare timesteps
1037
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
1038
+ image_seq_len = latents.shape[1]
1039
+ mu = calculate_shift(
1040
+ image_seq_len,
1041
+ self.scheduler.config.get("base_image_seq_len", 256),
1042
+ self.scheduler.config.get("max_image_seq_len", 4096),
1043
+ self.scheduler.config.get("base_shift", 0.5),
1044
+ self.scheduler.config.get("max_shift", 1.16),
1045
+ )
1046
+ timesteps, num_inference_steps = retrieve_timesteps(
1047
+ self.scheduler,
1048
+ num_inference_steps,
1049
+ device,
1050
+ sigmas=sigmas,
1051
+ mu=mu,
1052
+ )
1053
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1054
+ self._num_timesteps = len(timesteps)
1055
+
1056
+ edit_momentum = None
1057
+ if edit_warmup_steps:
1058
+ tmp_e_warmup_steps = edit_warmup_steps if isinstance(edit_warmup_steps, list) else [edit_warmup_steps]
1059
+ min_edit_warmup_steps = min(tmp_e_warmup_steps)
1060
+ else:
1061
+ min_edit_warmup_steps = 0
1062
+
1063
+ if edit_cooldown_steps:
1064
+ tmp_e_cooldown_steps = (
1065
+ edit_cooldown_steps if isinstance(edit_cooldown_steps, list) else [edit_cooldown_steps]
1066
+ )
1067
+ max_edit_cooldown_steps = min(max(tmp_e_cooldown_steps), num_inference_steps)
1068
+ else:
1069
+ max_edit_cooldown_steps = num_inference_steps
1070
+
1071
+ # handle guidance
1072
+ if self.transformer.config.guidance_embeds:
1073
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
1074
+ guidance = guidance.expand(latents.shape[0])
1075
+ else:
1076
+ guidance = None
1077
+
1078
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) and (
1079
+ negative_ip_adapter_image is None and negative_ip_adapter_image_embeds is None
1080
+ ):
1081
+ negative_ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8)
1082
+ elif (ip_adapter_image is None and ip_adapter_image_embeds is None) and (
1083
+ negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None
1084
+ ):
1085
+ ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8)
1086
+
1087
+ if self.joint_attention_kwargs is None:
1088
+ self._joint_attention_kwargs = {}
1089
+
1090
+ image_embeds = None
1091
+ negative_image_embeds = None
1092
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1093
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1094
+ ip_adapter_image,
1095
+ ip_adapter_image_embeds,
1096
+ device,
1097
+ batch_size * num_images_per_prompt,
1098
+ )
1099
+ if negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None:
1100
+ negative_image_embeds = self.prepare_ip_adapter_image_embeds(
1101
+ negative_ip_adapter_image,
1102
+ negative_ip_adapter_image_embeds,
1103
+ device,
1104
+ batch_size * num_images_per_prompt,
1105
+ )
1106
+
1107
+ # 6. Denoising loop
1108
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1109
+ for i, t in enumerate(timesteps):
1110
+ if self.interrupt:
1111
+ continue
1112
+
1113
+ if image_embeds is not None:
1114
+ self._joint_attention_kwargs["ip_adapter_image_embeds"] = image_embeds
1115
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1116
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
1117
+
1118
+ # handle guidance
1119
+ if self.transformer.config.guidance_embeds:
1120
+ guidance = torch.tensor([guidance_scale], device=device)
1121
+ guidance = guidance.expand(latents.shape[0])
1122
+ else:
1123
+ guidance = None
1124
+
1125
+ noise_pred = self.transformer(
1126
+ hidden_states=latents,
1127
+ timestep=timestep / 1000,
1128
+ guidance=guidance,
1129
+ pooled_projections=pooled_prompt_embeds,
1130
+ encoder_hidden_states=prompt_embeds,
1131
+ txt_ids=text_ids,
1132
+ img_ids=latent_image_ids,
1133
+ joint_attention_kwargs=self.joint_attention_kwargs,
1134
+ return_dict=False,
1135
+ )[0]
1136
+
1137
+ if enable_edit_guidance and max_edit_cooldown_steps >= i >= min_edit_warmup_steps:
1138
+ noise_pred_edit_concepts = []
1139
+ for e_embed, pooled_e_embed, e_text_id in zip(
1140
+ editing_prompts_embeds, pooled_editing_prompt_embeds, edit_text_ids
1141
+ ):
1142
+ noise_pred_edit = self.transformer(
1143
+ hidden_states=latents,
1144
+ timestep=timestep / 1000,
1145
+ guidance=guidance,
1146
+ pooled_projections=pooled_e_embed,
1147
+ encoder_hidden_states=e_embed,
1148
+ txt_ids=e_text_id,
1149
+ img_ids=latent_image_ids,
1150
+ joint_attention_kwargs=self.joint_attention_kwargs,
1151
+ return_dict=False,
1152
+ )[0]
1153
+ noise_pred_edit_concepts.append(noise_pred_edit)
1154
+
1155
+ if do_true_cfg:
1156
+ if negative_image_embeds is not None:
1157
+ self._joint_attention_kwargs["ip_adapter_image_embeds"] = negative_image_embeds
1158
+ noise_pred_uncond = self.transformer(
1159
+ hidden_states=latents,
1160
+ timestep=timestep / 1000,
1161
+ guidance=guidance,
1162
+ pooled_projections=negative_pooled_prompt_embeds,
1163
+ encoder_hidden_states=negative_prompt_embeds,
1164
+ txt_ids=text_ids,
1165
+ img_ids=latent_image_ids,
1166
+ joint_attention_kwargs=self.joint_attention_kwargs,
1167
+ return_dict=False,
1168
+ )[0]
1169
+ noise_guidance = true_cfg_scale * (noise_pred - noise_pred_uncond)
1170
+ else:
1171
+ noise_pred_uncond = noise_pred
1172
+ noise_guidance = noise_pred
1173
+
1174
+ if edit_momentum is None:
1175
+ edit_momentum = torch.zeros_like(noise_guidance)
1176
+
1177
+ if enable_edit_guidance and max_edit_cooldown_steps >= i >= min_edit_warmup_steps:
1178
+ concept_weights = torch.zeros(
1179
+ (enabled_editing_prompts, noise_guidance.shape[0]),
1180
+ device=device,
1181
+ dtype=noise_guidance.dtype,
1182
+ )
1183
+ noise_guidance_edit = torch.zeros(
1184
+ (enabled_editing_prompts, *noise_guidance.shape),
1185
+ device=device,
1186
+ dtype=noise_guidance.dtype,
1187
+ )
1188
+
1189
+ warmup_inds = []
1190
+ for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts):
1191
+ if isinstance(edit_guidance_scale, list):
1192
+ edit_guidance_scale_c = edit_guidance_scale[c]
1193
+ else:
1194
+ edit_guidance_scale_c = edit_guidance_scale
1195
+
1196
+ if isinstance(edit_threshold, list):
1197
+ edit_threshold_c = edit_threshold[c]
1198
+ else:
1199
+ edit_threshold_c = edit_threshold
1200
+ if isinstance(reverse_editing_direction, list):
1201
+ reverse_editing_direction_c = reverse_editing_direction[c]
1202
+ else:
1203
+ reverse_editing_direction_c = reverse_editing_direction
1204
+ if edit_weights:
1205
+ edit_weight_c = edit_weights[c]
1206
+ else:
1207
+ edit_weight_c = 1.0
1208
+ if isinstance(edit_warmup_steps, list):
1209
+ edit_warmup_steps_c = edit_warmup_steps[c]
1210
+ else:
1211
+ edit_warmup_steps_c = edit_warmup_steps
1212
+
1213
+ if isinstance(edit_cooldown_steps, list):
1214
+ edit_cooldown_steps_c = edit_cooldown_steps[c]
1215
+ elif edit_cooldown_steps is None:
1216
+ edit_cooldown_steps_c = i + 1
1217
+ else:
1218
+ edit_cooldown_steps_c = edit_cooldown_steps
1219
+ if i >= edit_warmup_steps_c:
1220
+ warmup_inds.append(c)
1221
+ if i >= edit_cooldown_steps_c:
1222
+ noise_guidance_edit[c, :, :, :] = torch.zeros_like(noise_pred_edit_concept)
1223
+ continue
1224
+
1225
+ if do_true_cfg:
1226
+ noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond
1227
+ else: # simple sega
1228
+ noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred
1229
+ tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2))
1230
+
1231
+ tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts)
1232
+ if reverse_editing_direction_c:
1233
+ noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1
1234
+ concept_weights[c, :] = tmp_weights
1235
+
1236
+ noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c
1237
+
1238
+ # torch.quantile function expects float32
1239
+ if noise_guidance_edit_tmp.dtype == torch.float32:
1240
+ tmp = torch.quantile(
1241
+ torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2),
1242
+ edit_threshold_c,
1243
+ dim=2,
1244
+ keepdim=False,
1245
+ )
1246
+ else:
1247
+ tmp = torch.quantile(
1248
+ torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32),
1249
+ edit_threshold_c,
1250
+ dim=2,
1251
+ keepdim=False,
1252
+ ).to(noise_guidance_edit_tmp.dtype)
1253
+
1254
+ noise_guidance_edit_tmp = torch.where(
1255
+ torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None],
1256
+ noise_guidance_edit_tmp,
1257
+ torch.zeros_like(noise_guidance_edit_tmp),
1258
+ )
1259
+
1260
+ noise_guidance_edit[c, :, :, :] = noise_guidance_edit_tmp
1261
+
1262
+ warmup_inds = torch.tensor(warmup_inds).to(device)
1263
+ if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0:
1264
+ concept_weights = concept_weights.to("cpu") # Offload to cpu
1265
+ noise_guidance_edit = noise_guidance_edit.to("cpu")
1266
+
1267
+ concept_weights_tmp = torch.index_select(concept_weights.to(device), 0, warmup_inds)
1268
+ concept_weights_tmp = torch.where(
1269
+ concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp
1270
+ )
1271
+ concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0)
1272
+
1273
+ noise_guidance_edit_tmp = torch.index_select(noise_guidance_edit.to(device), 0, warmup_inds)
1274
+ noise_guidance_edit_tmp = torch.einsum(
1275
+ "cb,cbij->bij", concept_weights_tmp, noise_guidance_edit_tmp
1276
+ )
1277
+ noise_guidance_edit_tmp = noise_guidance_edit_tmp
1278
+ noise_guidance = noise_guidance + noise_guidance_edit_tmp
1279
+
1280
+ del noise_guidance_edit_tmp
1281
+ del concept_weights_tmp
1282
+ concept_weights = concept_weights.to(device)
1283
+ noise_guidance_edit = noise_guidance_edit.to(device)
1284
+
1285
+ concept_weights = torch.where(
1286
+ concept_weights < 0, torch.zeros_like(concept_weights), concept_weights
1287
+ )
1288
+
1289
+ concept_weights = torch.nan_to_num(concept_weights)
1290
+
1291
+ noise_guidance_edit = torch.einsum("cb,cbij->bij", concept_weights, noise_guidance_edit)
1292
+
1293
+ noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum
1294
+
1295
+ edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit
1296
+
1297
+ if warmup_inds.shape[0] == len(noise_pred_edit_concepts):
1298
+ noise_guidance = noise_guidance + noise_guidance_edit
1299
+
1300
+ if sem_guidance is not None:
1301
+ edit_guidance = sem_guidance[i].to(device)
1302
+ noise_guidance = noise_guidance + edit_guidance
1303
+
1304
+ if do_true_cfg:
1305
+ noise_pred = noise_guidance + noise_pred_uncond
1306
+ else:
1307
+ noise_pred = noise_guidance
1308
+
1309
+ # compute the previous noisy sample x_t -> x_t-1
1310
+ latents_dtype = latents.dtype
1311
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
1312
+
1313
+ if latents.dtype != latents_dtype:
1314
+ if torch.backends.mps.is_available():
1315
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1316
+ latents = latents.to(latents_dtype)
1317
+
1318
+ if callback_on_step_end is not None:
1319
+ callback_kwargs = {}
1320
+ for k in callback_on_step_end_tensor_inputs:
1321
+ callback_kwargs[k] = locals()[k]
1322
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1323
+
1324
+ latents = callback_outputs.pop("latents", latents)
1325
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1326
+
1327
+ # call the callback, if provided
1328
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1329
+ progress_bar.update()
1330
+
1331
+ if XLA_AVAILABLE:
1332
+ xm.mark_step()
1333
+
1334
+ if output_type == "latent":
1335
+ image = latents
1336
+
1337
+ else:
1338
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
1339
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
1340
+ image = self.vae.decode(latents, return_dict=False)[0]
1341
+ image = self.image_processor.postprocess(image, output_type=output_type)
1342
+
1343
+ # Offload all models
1344
+ self.maybe_free_model_hooks()
1345
+
1346
+ if not return_dict:
1347
+ return (image,)
1348
+
1349
+ return FluxPipelineOutput(
1350
+ image,
1351
+ )