Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -14,9 +14,8 @@ import numpy as np
|
|
14 |
from PIL import Image
|
15 |
import torch
|
16 |
import torch._dynamo
|
17 |
-
import diffusers
|
18 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
19 |
-
|
20 |
from diffusers import EulerAncestralDiscreteScheduler
|
21 |
#from diffusers import DPMSolverSDEScheduler
|
22 |
|
@@ -27,7 +26,7 @@ import time
|
|
27 |
import datetime
|
28 |
from gradio import themes
|
29 |
from image_gen_aux import UpscaleWithModel
|
30 |
-
from diffusers.models.attention_processor import AttnProcessor2_0
|
31 |
|
32 |
torch.backends.cuda.matmul.allow_tf32 = False
|
33 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -186,6 +185,8 @@ def load_and_prepare_model():
|
|
186 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
187 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
188 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
|
|
|
|
189 |
|
190 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|
191 |
|
@@ -197,7 +198,7 @@ def load_and_prepare_model():
|
|
197 |
|
198 |
#Some typical diffusers pipeline optimizations
|
199 |
pipe.unet.to(memory_format=torch.channels_last) #Unsupported by hidet, but does not seem to make a difference if disabled.
|
200 |
-
pipe.enable_vae_tiling()
|
201 |
#pipe.enable_xformers_memory_efficient_attention()
|
202 |
|
203 |
|
|
|
14 |
from PIL import Image
|
15 |
import torch
|
16 |
import torch._dynamo
|
17 |
+
#import diffusers
|
18 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
|
|
19 |
from diffusers import EulerAncestralDiscreteScheduler
|
20 |
#from diffusers import DPMSolverSDEScheduler
|
21 |
|
|
|
26 |
import datetime
|
27 |
from gradio import themes
|
28 |
from image_gen_aux import UpscaleWithModel
|
29 |
+
#from diffusers.models.attention_processor import AttnProcessor2_0
|
30 |
|
31 |
torch.backends.cuda.matmul.allow_tf32 = False
|
32 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
185 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
186 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
187 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
188 |
+
# pipe.load_lora_weights("Keltezaa/flux_pussy_NSFW", weight_name="flux-pussy.safetensors", adapter_name="fantasy")
|
189 |
+
# pipe.load_lora_weights("xey/sldr_flux_nsfw_v2-studio", weight_name="sldr_flux_nsfw_v2-studio.safetensors", adapter_name="fantasy")
|
190 |
|
191 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|
192 |
|
|
|
198 |
|
199 |
#Some typical diffusers pipeline optimizations
|
200 |
pipe.unet.to(memory_format=torch.channels_last) #Unsupported by hidet, but does not seem to make a difference if disabled.
|
201 |
+
#pipe.enable_vae_tiling()
|
202 |
#pipe.enable_xformers_memory_efficient_attention()
|
203 |
|
204 |
|