Datasets:

ArXiv:
Diffusers Bot commited on
Commit
d1c9639
·
verified ·
1 Parent(s): 74434c1

Upload folder using huggingface_hub

Browse files
main/lpw_stable_diffusion.py CHANGED
@@ -13,13 +13,17 @@ from diffusers.configuration_utils import FrozenDict
13
  from diffusers.image_processor import VaeImageProcessor
14
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
15
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
 
16
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
17
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
18
  from diffusers.schedulers import KarrasDiffusionSchedulers
19
  from diffusers.utils import (
20
  PIL_INTERPOLATION,
 
21
  deprecate,
22
  logging,
 
 
23
  )
24
  from diffusers.utils.torch_utils import randn_tensor
25
 
@@ -199,6 +203,7 @@ def get_unweighted_text_embeddings(
199
  text_input: torch.Tensor,
200
  chunk_length: int,
201
  no_boseos_middle: Optional[bool] = True,
 
202
  ):
203
  """
204
  When the length of tokens is a multiple of the capacity of the text encoder,
@@ -214,7 +219,20 @@ def get_unweighted_text_embeddings(
214
  # cover the head and the tail by the starting and the ending tokens
215
  text_input_chunk[:, 0] = text_input[0, 0]
216
  text_input_chunk[:, -1] = text_input[0, -1]
217
- text_embedding = pipe.text_encoder(text_input_chunk)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  if no_boseos_middle:
220
  if i == 0:
@@ -230,7 +248,10 @@ def get_unweighted_text_embeddings(
230
  text_embeddings.append(text_embedding)
231
  text_embeddings = torch.concat(text_embeddings, axis=1)
232
  else:
233
- text_embeddings = pipe.text_encoder(text_input)[0]
 
 
 
234
  return text_embeddings
235
 
236
 
@@ -242,6 +263,8 @@ def get_weighted_text_embeddings(
242
  no_boseos_middle: Optional[bool] = False,
243
  skip_parsing: Optional[bool] = False,
244
  skip_weighting: Optional[bool] = False,
 
 
245
  ):
246
  r"""
247
  Prompts can be assigned with local weights using brackets. For example,
@@ -268,6 +291,16 @@ def get_weighted_text_embeddings(
268
  skip_weighting (`bool`, *optional*, defaults to `False`):
269
  Skip the weighting. When the parsing is skipped, it is forced True.
270
  """
 
 
 
 
 
 
 
 
 
 
271
  max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
272
  if isinstance(prompt, str):
273
  prompt = [prompt]
@@ -334,10 +367,7 @@ def get_weighted_text_embeddings(
334
 
335
  # get the embeddings
336
  text_embeddings = get_unweighted_text_embeddings(
337
- pipe,
338
- prompt_tokens,
339
- pipe.tokenizer.model_max_length,
340
- no_boseos_middle=no_boseos_middle,
341
  )
342
  prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
343
  if uncond_prompt is not None:
@@ -346,6 +376,7 @@ def get_weighted_text_embeddings(
346
  uncond_tokens,
347
  pipe.tokenizer.model_max_length,
348
  no_boseos_middle=no_boseos_middle,
 
349
  )
350
  uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
351
 
@@ -362,6 +393,11 @@ def get_weighted_text_embeddings(
362
  current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
363
  uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
364
 
 
 
 
 
 
365
  if uncond_prompt is not None:
366
  return text_embeddings, uncond_embeddings
367
  return text_embeddings, None
@@ -549,6 +585,8 @@ class StableDiffusionLongPromptWeightingPipeline(
549
  max_embeddings_multiples=3,
550
  prompt_embeds: Optional[torch.Tensor] = None,
551
  negative_prompt_embeds: Optional[torch.Tensor] = None,
 
 
552
  ):
553
  r"""
554
  Encodes the prompt into text encoder hidden states.
@@ -597,6 +635,8 @@ class StableDiffusionLongPromptWeightingPipeline(
597
  prompt=prompt,
598
  uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
599
  max_embeddings_multiples=max_embeddings_multiples,
 
 
600
  )
601
  if prompt_embeds is None:
602
  prompt_embeds = prompt_embeds1
@@ -790,6 +830,7 @@ class StableDiffusionLongPromptWeightingPipeline(
790
  return_dict: bool = True,
791
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
792
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
793
  callback_steps: int = 1,
794
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
795
  ):
@@ -865,6 +906,9 @@ class StableDiffusionLongPromptWeightingPipeline(
865
  is_cancelled_callback (`Callable`, *optional*):
866
  A function that will be called every `callback_steps` steps during inference. If the function returns
867
  `True`, the inference will be cancelled.
 
 
 
868
  callback_steps (`int`, *optional*, defaults to 1):
869
  The frequency at which the `callback` function will be called. If not specified, the callback will be
870
  called at every step.
@@ -903,6 +947,7 @@ class StableDiffusionLongPromptWeightingPipeline(
903
  # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
904
  # corresponds to doing no classifier free guidance.
905
  do_classifier_free_guidance = guidance_scale > 1.0
 
906
 
907
  # 3. Encode input prompt
908
  prompt_embeds = self._encode_prompt(
@@ -914,6 +959,8 @@ class StableDiffusionLongPromptWeightingPipeline(
914
  max_embeddings_multiples,
915
  prompt_embeds=prompt_embeds,
916
  negative_prompt_embeds=negative_prompt_embeds,
 
 
917
  )
918
  dtype = prompt_embeds.dtype
919
 
@@ -1044,6 +1091,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1044
  return_dict: bool = True,
1045
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1046
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
1047
  callback_steps: int = 1,
1048
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1049
  ):
@@ -1101,6 +1149,9 @@ class StableDiffusionLongPromptWeightingPipeline(
1101
  is_cancelled_callback (`Callable`, *optional*):
1102
  A function that will be called every `callback_steps` steps during inference. If the function returns
1103
  `True`, the inference will be cancelled.
 
 
 
1104
  callback_steps (`int`, *optional*, defaults to 1):
1105
  The frequency at which the `callback` function will be called. If not specified, the callback will be
1106
  called at every step.
@@ -1135,6 +1186,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1135
  return_dict=return_dict,
1136
  callback=callback,
1137
  is_cancelled_callback=is_cancelled_callback,
 
1138
  callback_steps=callback_steps,
1139
  cross_attention_kwargs=cross_attention_kwargs,
1140
  )
 
13
  from diffusers.image_processor import VaeImageProcessor
14
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
15
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
17
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
18
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
19
  from diffusers.schedulers import KarrasDiffusionSchedulers
20
  from diffusers.utils import (
21
  PIL_INTERPOLATION,
22
+ USE_PEFT_BACKEND,
23
  deprecate,
24
  logging,
25
+ scale_lora_layers,
26
+ unscale_lora_layers,
27
  )
28
  from diffusers.utils.torch_utils import randn_tensor
29
 
 
203
  text_input: torch.Tensor,
204
  chunk_length: int,
205
  no_boseos_middle: Optional[bool] = True,
206
+ clip_skip: Optional[int] = None,
207
  ):
208
  """
209
  When the length of tokens is a multiple of the capacity of the text encoder,
 
219
  # cover the head and the tail by the starting and the ending tokens
220
  text_input_chunk[:, 0] = text_input[0, 0]
221
  text_input_chunk[:, -1] = text_input[0, -1]
222
+ if clip_skip is None:
223
+ prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device))
224
+ text_embedding = prompt_embeds[0]
225
+ else:
226
+ prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device), output_hidden_states=True)
227
+ # Access the `hidden_states` first, that contains a tuple of
228
+ # all the hidden states from the encoder layers. Then index into
229
+ # the tuple to access the hidden states from the desired layer.
230
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
231
+ # We also need to apply the final LayerNorm here to not mess with the
232
+ # representations. The `last_hidden_states` that we typically use for
233
+ # obtaining the final prompt representations passes through the LayerNorm
234
+ # layer.
235
+ text_embedding = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
236
 
237
  if no_boseos_middle:
238
  if i == 0:
 
248
  text_embeddings.append(text_embedding)
249
  text_embeddings = torch.concat(text_embeddings, axis=1)
250
  else:
251
+ if clip_skip is None:
252
+ clip_skip = 0
253
+ prompt_embeds = pipe.text_encoder(text_input, output_hidden_states=True)[-1][-(clip_skip + 1)]
254
+ text_embeddings = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
255
  return text_embeddings
256
 
257
 
 
263
  no_boseos_middle: Optional[bool] = False,
264
  skip_parsing: Optional[bool] = False,
265
  skip_weighting: Optional[bool] = False,
266
+ clip_skip=None,
267
+ lora_scale=None,
268
  ):
269
  r"""
270
  Prompts can be assigned with local weights using brackets. For example,
 
291
  skip_weighting (`bool`, *optional*, defaults to `False`):
292
  Skip the weighting. When the parsing is skipped, it is forced True.
293
  """
294
+ # set lora scale so that monkey patched LoRA
295
+ # function of text encoder can correctly access it
296
+ if lora_scale is not None and isinstance(pipe, StableDiffusionLoraLoaderMixin):
297
+ pipe._lora_scale = lora_scale
298
+
299
+ # dynamically adjust the LoRA scale
300
+ if not USE_PEFT_BACKEND:
301
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
302
+ else:
303
+ scale_lora_layers(pipe.text_encoder, lora_scale)
304
  max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
305
  if isinstance(prompt, str):
306
  prompt = [prompt]
 
367
 
368
  # get the embeddings
369
  text_embeddings = get_unweighted_text_embeddings(
370
+ pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, clip_skip=clip_skip
 
 
 
371
  )
372
  prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
373
  if uncond_prompt is not None:
 
376
  uncond_tokens,
377
  pipe.tokenizer.model_max_length,
378
  no_boseos_middle=no_boseos_middle,
379
+ clip_skip=clip_skip,
380
  )
381
  uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
382
 
 
393
  current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
394
  uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
395
 
396
+ if pipe.text_encoder is not None:
397
+ if isinstance(pipe, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
398
+ # Retrieve the original scale by scaling back the LoRA layers
399
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
400
+
401
  if uncond_prompt is not None:
402
  return text_embeddings, uncond_embeddings
403
  return text_embeddings, None
 
585
  max_embeddings_multiples=3,
586
  prompt_embeds: Optional[torch.Tensor] = None,
587
  negative_prompt_embeds: Optional[torch.Tensor] = None,
588
+ clip_skip: Optional[int] = None,
589
+ lora_scale: Optional[float] = None,
590
  ):
591
  r"""
592
  Encodes the prompt into text encoder hidden states.
 
635
  prompt=prompt,
636
  uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
637
  max_embeddings_multiples=max_embeddings_multiples,
638
+ clip_skip=clip_skip,
639
+ lora_scale=lora_scale,
640
  )
641
  if prompt_embeds is None:
642
  prompt_embeds = prompt_embeds1
 
830
  return_dict: bool = True,
831
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
832
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
833
+ clip_skip: Optional[int] = None,
834
  callback_steps: int = 1,
835
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
836
  ):
 
906
  is_cancelled_callback (`Callable`, *optional*):
907
  A function that will be called every `callback_steps` steps during inference. If the function returns
908
  `True`, the inference will be cancelled.
909
+ clip_skip (`int`, *optional*):
910
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
911
+ the output of the pre-final layer will be used for computing the prompt embeddings.
912
  callback_steps (`int`, *optional*, defaults to 1):
913
  The frequency at which the `callback` function will be called. If not specified, the callback will be
914
  called at every step.
 
947
  # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
948
  # corresponds to doing no classifier free guidance.
949
  do_classifier_free_guidance = guidance_scale > 1.0
950
+ lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
951
 
952
  # 3. Encode input prompt
953
  prompt_embeds = self._encode_prompt(
 
959
  max_embeddings_multiples,
960
  prompt_embeds=prompt_embeds,
961
  negative_prompt_embeds=negative_prompt_embeds,
962
+ clip_skip=clip_skip,
963
+ lora_scale=lora_scale,
964
  )
965
  dtype = prompt_embeds.dtype
966
 
 
1091
  return_dict: bool = True,
1092
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1093
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
1094
+ clip_skip=None,
1095
  callback_steps: int = 1,
1096
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1097
  ):
 
1149
  is_cancelled_callback (`Callable`, *optional*):
1150
  A function that will be called every `callback_steps` steps during inference. If the function returns
1151
  `True`, the inference will be cancelled.
1152
+ clip_skip (`int`, *optional*):
1153
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1154
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1155
  callback_steps (`int`, *optional*, defaults to 1):
1156
  The frequency at which the `callback` function will be called. If not specified, the callback will be
1157
  called at every step.
 
1186
  return_dict=return_dict,
1187
  callback=callback,
1188
  is_cancelled_callback=is_cancelled_callback,
1189
+ clip_skip=clip_skip,
1190
  callback_steps=callback_steps,
1191
  cross_attention_kwargs=cross_attention_kwargs,
1192
  )
main/lpw_stable_diffusion_xl.py CHANGED
@@ -25,21 +25,25 @@ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
  from diffusers.loaders import (
26
  FromSingleFileMixin,
27
  IPAdapterMixin,
28
- StableDiffusionLoraLoaderMixin,
29
  TextualInversionLoaderMixin,
30
  )
31
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
32
  from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
 
33
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
34
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
35
  from diffusers.schedulers import KarrasDiffusionSchedulers
36
  from diffusers.utils import (
 
37
  deprecate,
38
  is_accelerate_available,
39
  is_accelerate_version,
40
  is_invisible_watermark_available,
41
  logging,
42
  replace_example_docstring,
 
 
43
  )
44
  from diffusers.utils.torch_utils import randn_tensor
45
 
@@ -261,6 +265,7 @@ def get_weighted_text_embeddings_sdxl(
261
  num_images_per_prompt: int = 1,
262
  device: Optional[torch.device] = None,
263
  clip_skip: Optional[int] = None,
 
264
  ):
265
  """
266
  This function can process long prompt with weights, no length limitation
@@ -281,6 +286,24 @@ def get_weighted_text_embeddings_sdxl(
281
  """
282
  device = device or pipe._execution_device
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  if prompt_2:
285
  prompt = f"{prompt} {prompt_2}"
286
 
@@ -429,6 +452,16 @@ def get_weighted_text_embeddings_sdxl(
429
  bs_embed * num_images_per_prompt, -1
430
  )
431
 
 
 
 
 
 
 
 
 
 
 
432
  return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
433
 
434
 
@@ -549,7 +582,7 @@ class SDXLLongPromptWeightingPipeline(
549
  StableDiffusionMixin,
550
  FromSingleFileMixin,
551
  IPAdapterMixin,
552
- StableDiffusionLoraLoaderMixin,
553
  TextualInversionLoaderMixin,
554
  ):
555
  r"""
@@ -561,8 +594,8 @@ class SDXLLongPromptWeightingPipeline(
561
  The pipeline also inherits the following loading methods:
562
  - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
563
  - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
564
- - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
565
- - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
566
  - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
567
 
568
  Args:
@@ -743,7 +776,7 @@ class SDXLLongPromptWeightingPipeline(
743
 
744
  # set lora scale so that monkey patched LoRA
745
  # function of text encoder can correctly access it
746
- if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
747
  self._lora_scale = lora_scale
748
 
749
  if prompt is not None and isinstance(prompt, str):
@@ -1612,7 +1645,9 @@ class SDXLLongPromptWeightingPipeline(
1612
  image_embeds = torch.cat([negative_image_embeds, image_embeds])
1613
 
1614
  # 3. Encode input prompt
1615
- (self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None)
 
 
1616
 
1617
  negative_prompt = negative_prompt if negative_prompt is not None else ""
1618
 
@@ -1627,6 +1662,7 @@ class SDXLLongPromptWeightingPipeline(
1627
  neg_prompt=negative_prompt,
1628
  num_images_per_prompt=num_images_per_prompt,
1629
  clip_skip=clip_skip,
 
1630
  )
1631
  dtype = prompt_embeds.dtype
1632
 
 
25
  from diffusers.loaders import (
26
  FromSingleFileMixin,
27
  IPAdapterMixin,
28
+ StableDiffusionXLLoraLoaderMixin,
29
  TextualInversionLoaderMixin,
30
  )
31
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
32
  from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
33
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
34
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
35
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
36
  from diffusers.schedulers import KarrasDiffusionSchedulers
37
  from diffusers.utils import (
38
+ USE_PEFT_BACKEND,
39
  deprecate,
40
  is_accelerate_available,
41
  is_accelerate_version,
42
  is_invisible_watermark_available,
43
  logging,
44
  replace_example_docstring,
45
+ scale_lora_layers,
46
+ unscale_lora_layers,
47
  )
48
  from diffusers.utils.torch_utils import randn_tensor
49
 
 
265
  num_images_per_prompt: int = 1,
266
  device: Optional[torch.device] = None,
267
  clip_skip: Optional[int] = None,
268
+ lora_scale: Optional[int] = None,
269
  ):
270
  """
271
  This function can process long prompt with weights, no length limitation
 
286
  """
287
  device = device or pipe._execution_device
288
 
289
+ # set lora scale so that monkey patched LoRA
290
+ # function of text encoder can correctly access it
291
+ if lora_scale is not None and isinstance(pipe, StableDiffusionXLLoraLoaderMixin):
292
+ pipe._lora_scale = lora_scale
293
+
294
+ # dynamically adjust the LoRA scale
295
+ if pipe.text_encoder is not None:
296
+ if not USE_PEFT_BACKEND:
297
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
298
+ else:
299
+ scale_lora_layers(pipe.text_encoder, lora_scale)
300
+
301
+ if pipe.text_encoder_2 is not None:
302
+ if not USE_PEFT_BACKEND:
303
+ adjust_lora_scale_text_encoder(pipe.text_encoder_2, lora_scale)
304
+ else:
305
+ scale_lora_layers(pipe.text_encoder_2, lora_scale)
306
+
307
  if prompt_2:
308
  prompt = f"{prompt} {prompt_2}"
309
 
 
452
  bs_embed * num_images_per_prompt, -1
453
  )
454
 
455
+ if pipe.text_encoder is not None:
456
+ if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
457
+ # Retrieve the original scale by scaling back the LoRA layers
458
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
459
+
460
+ if pipe.text_encoder_2 is not None:
461
+ if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
462
+ # Retrieve the original scale by scaling back the LoRA layers
463
+ unscale_lora_layers(pipe.text_encoder_2, lora_scale)
464
+
465
  return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
466
 
467
 
 
582
  StableDiffusionMixin,
583
  FromSingleFileMixin,
584
  IPAdapterMixin,
585
+ StableDiffusionXLLoraLoaderMixin,
586
  TextualInversionLoaderMixin,
587
  ):
588
  r"""
 
594
  The pipeline also inherits the following loading methods:
595
  - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
596
  - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
597
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
598
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
599
  - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
600
 
601
  Args:
 
776
 
777
  # set lora scale so that monkey patched LoRA
778
  # function of text encoder can correctly access it
779
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
780
  self._lora_scale = lora_scale
781
 
782
  if prompt is not None and isinstance(prompt, str):
 
1645
  image_embeds = torch.cat([negative_image_embeds, image_embeds])
1646
 
1647
  # 3. Encode input prompt
1648
+ lora_scale = (
1649
+ self._cross_attention_kwargs.get("scale", None) if self._cross_attention_kwargs is not None else None
1650
+ )
1651
 
1652
  negative_prompt = negative_prompt if negative_prompt is not None else ""
1653
 
 
1662
  neg_prompt=negative_prompt,
1663
  num_images_per_prompt=num_images_per_prompt,
1664
  clip_skip=clip_skip,
1665
+ lora_scale=lora_scale,
1666
  )
1667
  dtype = prompt_embeds.dtype
1668