ford442 commited on
Commit
edaa75b
·
verified ·
1 Parent(s): 369d431

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -27,6 +27,7 @@ import time
27
  import datetime
28
  from gradio import themes
29
  from image_gen_aux import UpscaleWithModel
 
30
 
31
  torch.backends.cuda.matmul.allow_tf32 = False
32
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
@@ -115,7 +116,6 @@ def load_and_prepare_model():
115
  #unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
116
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
117
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
118
- sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1) #,use_karras_sigmas=True)
119
  #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear")
120
  #sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
121
  #pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
@@ -141,6 +141,7 @@ def load_and_prepare_model():
141
  # scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
142
  #scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
143
  )
 
144
  #pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
145
 
146
  #pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae') # ,use_safetensors=True FAILS
@@ -182,7 +183,7 @@ def load_and_prepare_model():
182
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
183
  # pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
184
 
185
- pipe.vae.set_default_attn_processor()
186
 
187
  #pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
188
  #pipe.unet.load_lora_adapter("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", prefix="unet")
 
27
  import datetime
28
  from gradio import themes
29
  from image_gen_aux import UpscaleWithModel
30
+ from diffusers.models.attention_processor import AttnProcessor2_0
31
 
32
  torch.backends.cuda.matmul.allow_tf32 = False
33
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
 
116
  #unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
117
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
118
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
 
119
  #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear")
120
  #sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
121
  #pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
 
141
  # scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
142
  #scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
143
  )
144
+ sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1) #,use_karras_sigmas=True)
145
  #pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
146
 
147
  #pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae') # ,use_safetensors=True FAILS
 
183
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
184
  # pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
185
 
186
+ pipe.vae.set_default_attn_processor(AttnProcessor2_0()) # Set attention processor first
187
 
188
  #pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
189
  #pipe.unet.load_lora_adapter("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", prefix="unet")