Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,8 @@ from PIL import Image
|
|
14 |
import torch
|
15 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, UNet2DConditionModel
|
16 |
from diffusers import EulerAncestralDiscreteScheduler
|
17 |
-
from diffusers import DPMSolverMultistepScheduler
|
18 |
-
from diffusers import AsymmetricAutoencoderKL
|
19 |
from typing import Tuple
|
20 |
import paramiko
|
21 |
import gc
|
@@ -92,7 +92,7 @@ def load_and_prepare_model(model_id):
|
|
92 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
93 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
94 |
#vaeX = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
95 |
-
|
96 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
97 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
98 |
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
@@ -239,6 +239,7 @@ def generate_30(
|
|
239 |
randomize_seed: bool = False,
|
240 |
use_resolution_binning: bool = True,
|
241 |
num_images: int = 1,
|
|
|
242 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
243 |
):
|
244 |
torch.backends.cudnn.benchmark = False
|
@@ -246,6 +247,8 @@ def generate_30(
|
|
246 |
gc.collect()
|
247 |
global models
|
248 |
pipe = models[model_choice]
|
|
|
|
|
249 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
250 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
251 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
@@ -295,7 +298,8 @@ def generate_60(
|
|
295 |
num_inference_steps: int = 250,
|
296 |
randomize_seed: bool = False,
|
297 |
use_resolution_binning: bool = True,
|
298 |
-
num_images: int = 1,
|
|
|
299 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
300 |
):
|
301 |
torch.backends.cudnn.benchmark = True
|
@@ -303,6 +307,8 @@ def generate_60(
|
|
303 |
gc.collect()
|
304 |
global models
|
305 |
pipe = models[model_choice]
|
|
|
|
|
306 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
307 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
308 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
@@ -353,6 +359,7 @@ def generate_90(
|
|
353 |
randomize_seed: bool = False,
|
354 |
use_resolution_binning: bool = True,
|
355 |
num_images: int = 1,
|
|
|
356 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
357 |
):
|
358 |
torch.backends.cudnn.benchmark = True
|
@@ -360,6 +367,8 @@ def generate_90(
|
|
360 |
gc.collect()
|
361 |
global models
|
362 |
pipe = models[model_choice]
|
|
|
|
|
363 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
364 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
365 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
@@ -479,6 +488,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
479 |
value=0,
|
480 |
)
|
481 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
482 |
with gr.Row():
|
483 |
width = gr.Slider(
|
484 |
label="Width",
|
@@ -541,7 +551,8 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
541 |
guidance_scale,
|
542 |
num_inference_steps,
|
543 |
randomize_seed,
|
544 |
-
num_images,
|
|
|
545 |
],
|
546 |
outputs=[result, seed],
|
547 |
)
|
@@ -564,7 +575,8 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
564 |
guidance_scale,
|
565 |
num_inference_steps,
|
566 |
randomize_seed,
|
567 |
-
num_images,
|
|
|
568 |
],
|
569 |
outputs=[result, seed],
|
570 |
)
|
@@ -587,7 +599,8 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
587 |
guidance_scale,
|
588 |
num_inference_steps,
|
589 |
randomize_seed,
|
590 |
-
num_images,
|
|
|
591 |
],
|
592 |
outputs=[result, seed],
|
593 |
)
|
|
|
14 |
import torch
|
15 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, UNet2DConditionModel
|
16 |
from diffusers import EulerAncestralDiscreteScheduler
|
17 |
+
#from diffusers import DPMSolverMultistepScheduler
|
18 |
+
#from diffusers import AsymmetricAutoencoderKL
|
19 |
from typing import Tuple
|
20 |
import paramiko
|
21 |
import gc
|
|
|
92 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
93 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
94 |
#vaeX = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
95 |
+
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
96 |
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
97 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
98 |
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
|
|
239 |
randomize_seed: bool = False,
|
240 |
use_resolution_binning: bool = True,
|
241 |
num_images: int = 1,
|
242 |
+
juggernaut: bool = True,
|
243 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
244 |
):
|
245 |
torch.backends.cudnn.benchmark = False
|
|
|
247 |
gc.collect()
|
248 |
global models
|
249 |
pipe = models[model_choice]
|
250 |
+
if juggernaut == False:
|
251 |
+
pipe.vae=vaeXL
|
252 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
253 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
254 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
|
|
298 |
num_inference_steps: int = 250,
|
299 |
randomize_seed: bool = False,
|
300 |
use_resolution_binning: bool = True,
|
301 |
+
num_images: int = 1,
|
302 |
+
juggernaut: bool = True,
|
303 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
304 |
):
|
305 |
torch.backends.cudnn.benchmark = True
|
|
|
307 |
gc.collect()
|
308 |
global models
|
309 |
pipe = models[model_choice]
|
310 |
+
if juggernaut == False:
|
311 |
+
pipe.vae=vaeXL
|
312 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
313 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
314 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
|
|
359 |
randomize_seed: bool = False,
|
360 |
use_resolution_binning: bool = True,
|
361 |
num_images: int = 1,
|
362 |
+
juggernaut: bool = True,
|
363 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
364 |
):
|
365 |
torch.backends.cudnn.benchmark = True
|
|
|
367 |
gc.collect()
|
368 |
global models
|
369 |
pipe = models[model_choice]
|
370 |
+
if juggernaut == False:
|
371 |
+
pipe.vae=vaeXL
|
372 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
373 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
374 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
|
|
488 |
value=0,
|
489 |
)
|
490 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
491 |
+
juggernaut = gr.Checkbox(label="Use Juggernaut VAE", value=True)
|
492 |
with gr.Row():
|
493 |
width = gr.Slider(
|
494 |
label="Width",
|
|
|
551 |
guidance_scale,
|
552 |
num_inference_steps,
|
553 |
randomize_seed,
|
554 |
+
num_images,
|
555 |
+
juggernaut,
|
556 |
],
|
557 |
outputs=[result, seed],
|
558 |
)
|
|
|
575 |
guidance_scale,
|
576 |
num_inference_steps,
|
577 |
randomize_seed,
|
578 |
+
num_images,
|
579 |
+
juggernaut,
|
580 |
],
|
581 |
outputs=[result, seed],
|
582 |
)
|
|
|
599 |
guidance_scale,
|
600 |
num_inference_steps,
|
601 |
randomize_seed,
|
602 |
+
num_images,
|
603 |
+
juggernaut,
|
604 |
],
|
605 |
outputs=[result, seed],
|
606 |
)
|