Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -27,10 +27,7 @@ import time
|
|
27 |
import datetime
|
28 |
from gradio import themes
|
29 |
from image_gen_aux import UpscaleWithModel
|
30 |
-
|
31 |
-
#from diffusers.models.attention_processor import AttnProcessor2_0
|
32 |
-
|
33 |
-
from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
|
34 |
|
35 |
torch.backends.cuda.matmul.allow_tf32 = False
|
36 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -171,6 +168,7 @@ def load_and_prepare_model():
|
|
171 |
|
172 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
173 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
|
|
174 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
175 |
pipe.vae = vaeX #.to(torch.bfloat16)
|
176 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
@@ -207,10 +205,11 @@ def load_and_prepare_model():
|
|
207 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
208 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
209 |
#pipe.unet.set_default_attn_processor()
|
|
|
210 |
#**** BETTER WAY ****#
|
211 |
pipe.to(device, torch.bfloat16)
|
212 |
#**** BETTER WAY ****#
|
213 |
-
|
214 |
#pipe.to(device)
|
215 |
#pipe.to(torch.bfloat16)
|
216 |
|
@@ -285,11 +284,6 @@ def generate_30(
|
|
285 |
num_inference_steps: int = 125,
|
286 |
use_resolution_binning: bool = True,
|
287 |
lora_scale: float = 0.5,
|
288 |
-
b1:float = 1.0,
|
289 |
-
b2:float = 1.0,
|
290 |
-
s1:float = 1.0,
|
291 |
-
s2:float = 1.0,
|
292 |
-
freeu: bool = False,
|
293 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
294 |
):
|
295 |
torch.set_default_device('cuda')
|
@@ -316,12 +310,6 @@ def generate_30(
|
|
316 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
317 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
318 |
batch_options = options.copy()
|
319 |
-
if freeu:
|
320 |
-
pipe.enable_freeu(s1,s2,b1,b2)
|
321 |
-
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
322 |
-
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
323 |
-
else:
|
324 |
-
pipe.disable_freeu()
|
325 |
rv_image = pipe(**batch_options).images[0]
|
326 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
327 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -352,11 +340,6 @@ def generate_60(
|
|
352 |
num_inference_steps: int = 250,
|
353 |
use_resolution_binning: bool = True,
|
354 |
lora_scale: float = 0.5,
|
355 |
-
b1:float = 1.0,
|
356 |
-
b2:float = 1.0,
|
357 |
-
s1:float = 1.0,
|
358 |
-
s2:float = 1.0,
|
359 |
-
freeu: bool = False,
|
360 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
361 |
):
|
362 |
#torch.set_default_device('cuda')
|
@@ -383,12 +366,6 @@ def generate_60(
|
|
383 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
384 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
385 |
batch_options = options.copy()
|
386 |
-
if freeu:
|
387 |
-
pipe.enable_freeu(s1,s2,b1,b2)
|
388 |
-
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
389 |
-
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
390 |
-
else:
|
391 |
-
pipe.disable_freeu()
|
392 |
rv_image = pipe(**batch_options).images[0]
|
393 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
394 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -419,11 +396,6 @@ def generate_90(
|
|
419 |
num_inference_steps: int = 250,
|
420 |
use_resolution_binning: bool = True,
|
421 |
lora_scale: float = 0.5,
|
422 |
-
b1:float = 1.0,
|
423 |
-
b2:float = 1.0,
|
424 |
-
s1:float = 1.0,
|
425 |
-
s2:float = 1.0,
|
426 |
-
freeu: bool = False,
|
427 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
428 |
):
|
429 |
#torch.set_default_device('cuda')
|
@@ -450,12 +422,6 @@ def generate_90(
|
|
450 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
451 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
452 |
batch_options = options.copy()
|
453 |
-
if freeu:
|
454 |
-
pipe.enable_freeu(s1,s2,b1,b2)
|
455 |
-
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
456 |
-
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
457 |
-
else:
|
458 |
-
pipe.disable_freeu()
|
459 |
rv_image = pipe(**batch_options).images[0]
|
460 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
461 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -579,34 +545,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
579 |
step=10,
|
580 |
value=180,
|
581 |
)
|
582 |
-
with gr.Row():
|
583 |
-
freeu = gr.Checkbox(label="Use FreeU", value=False)
|
584 |
-
|
585 |
-
b1 = gr.Slider(label='b1: backbone factor of the first stage block of decoder',
|
586 |
-
minimum=1,
|
587 |
-
maximum=1.6,
|
588 |
-
step=0.01,
|
589 |
-
value=1.1
|
590 |
-
)
|
591 |
-
b2 = gr.Slider(label='b2: backbone factor of the second stage block of decoder',
|
592 |
-
minimum=1,
|
593 |
-
maximum=1.6,
|
594 |
-
step=0.01,
|
595 |
-
value=1.2
|
596 |
-
)
|
597 |
-
with gr.Row():
|
598 |
-
s1 = gr.Slider(label='s1: skip factor of the first stage block of decoder',
|
599 |
-
minimum=0,
|
600 |
-
maximum=1,
|
601 |
-
step=0.1,
|
602 |
-
value=0.2
|
603 |
-
)
|
604 |
-
s2 = gr.Slider(label='s2: skip factor of the second stage block of decoder',
|
605 |
-
minimum=0,
|
606 |
-
maximum=1,
|
607 |
-
step=0.1,
|
608 |
-
value=0.2
|
609 |
-
)
|
610 |
|
611 |
gr.Examples(
|
612 |
examples=examples,
|
@@ -638,11 +576,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
638 |
guidance_scale,
|
639 |
num_inference_steps,
|
640 |
lora_scale,
|
641 |
-
b1,
|
642 |
-
b2,
|
643 |
-
s1,
|
644 |
-
s2,
|
645 |
-
freeu,
|
646 |
],
|
647 |
outputs=[result],
|
648 |
)
|
@@ -664,11 +597,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
664 |
guidance_scale,
|
665 |
num_inference_steps,
|
666 |
lora_scale,
|
667 |
-
b1,
|
668 |
-
b2,
|
669 |
-
s1,
|
670 |
-
s2,
|
671 |
-
freeu,
|
672 |
],
|
673 |
outputs=[result],
|
674 |
)
|
@@ -690,11 +618,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
690 |
guidance_scale,
|
691 |
num_inference_steps,
|
692 |
lora_scale,
|
693 |
-
b1,
|
694 |
-
b2,
|
695 |
-
s1,
|
696 |
-
s2,
|
697 |
-
freeu,
|
698 |
],
|
699 |
outputs=[result],
|
700 |
)
|
|
|
27 |
import datetime
|
28 |
from gradio import themes
|
29 |
from image_gen_aux import UpscaleWithModel
|
30 |
+
from diffusers.models.attention_processor import AttnProcessor2_0
|
|
|
|
|
|
|
31 |
|
32 |
torch.backends.cuda.matmul.allow_tf32 = False
|
33 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
168 |
|
169 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
170 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
171 |
+
|
172 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
173 |
pipe.vae = vaeX #.to(torch.bfloat16)
|
174 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
|
|
205 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
206 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
207 |
#pipe.unet.set_default_attn_processor()
|
208 |
+
|
209 |
#**** BETTER WAY ****#
|
210 |
pipe.to(device, torch.bfloat16)
|
211 |
#**** BETTER WAY ****#
|
212 |
+
|
213 |
#pipe.to(device)
|
214 |
#pipe.to(torch.bfloat16)
|
215 |
|
|
|
284 |
num_inference_steps: int = 125,
|
285 |
use_resolution_binning: bool = True,
|
286 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
287 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
288 |
):
|
289 |
torch.set_default_device('cuda')
|
|
|
310 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
311 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
312 |
batch_options = options.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
rv_image = pipe(**batch_options).images[0]
|
314 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
315 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
340 |
num_inference_steps: int = 250,
|
341 |
use_resolution_binning: bool = True,
|
342 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
343 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
344 |
):
|
345 |
#torch.set_default_device('cuda')
|
|
|
366 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
367 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
368 |
batch_options = options.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
369 |
rv_image = pipe(**batch_options).images[0]
|
370 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
371 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
396 |
num_inference_steps: int = 250,
|
397 |
use_resolution_binning: bool = True,
|
398 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
399 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
400 |
):
|
401 |
#torch.set_default_device('cuda')
|
|
|
422 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
423 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
424 |
batch_options = options.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
rv_image = pipe(**batch_options).images[0]
|
426 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
427 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
545 |
step=10,
|
546 |
value=180,
|
547 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
548 |
|
549 |
gr.Examples(
|
550 |
examples=examples,
|
|
|
576 |
guidance_scale,
|
577 |
num_inference_steps,
|
578 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
579 |
],
|
580 |
outputs=[result],
|
581 |
)
|
|
|
597 |
guidance_scale,
|
598 |
num_inference_steps,
|
599 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
600 |
],
|
601 |
outputs=[result],
|
602 |
)
|
|
|
618 |
guidance_scale,
|
619 |
num_inference_steps,
|
620 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
621 |
],
|
622 |
outputs=[result],
|
623 |
)
|