Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -168,7 +168,10 @@ def load_and_prepare_model():
|
|
168 |
|
169 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
170 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
|
|
171 |
pipe.vae = vaeX #.to(torch.bfloat16)
|
|
|
|
|
172 |
#pipe.unet = unetX
|
173 |
|
174 |
#pipe.vae.do_resize=False
|
@@ -201,9 +204,12 @@ def load_and_prepare_model():
|
|
201 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
202 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
203 |
#pipe.unet.set_default_attn_processor()
|
204 |
-
|
205 |
-
pipe.to(device)
|
206 |
-
|
|
|
|
|
|
|
207 |
|
208 |
print(f'Pipeline: ')
|
209 |
#print(f'_optional_components: {pipe._optional_components}')
|
@@ -276,6 +282,11 @@ def generate_30(
|
|
276 |
num_inference_steps: int = 125,
|
277 |
use_resolution_binning: bool = True,
|
278 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
279 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
280 |
):
|
281 |
torch.set_default_device('cuda')
|
@@ -302,6 +313,9 @@ def generate_30(
|
|
302 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
303 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
304 |
batch_options = options.copy()
|
|
|
|
|
|
|
305 |
rv_image = pipe(**batch_options).images[0]
|
306 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
307 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -332,6 +346,11 @@ def generate_60(
|
|
332 |
num_inference_steps: int = 250,
|
333 |
use_resolution_binning: bool = True,
|
334 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
335 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
336 |
):
|
337 |
#torch.set_default_device('cuda')
|
@@ -358,6 +377,9 @@ def generate_60(
|
|
358 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
359 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
360 |
batch_options = options.copy()
|
|
|
|
|
|
|
361 |
rv_image = pipe(**batch_options).images[0]
|
362 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
363 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -388,6 +410,11 @@ def generate_90(
|
|
388 |
num_inference_steps: int = 250,
|
389 |
use_resolution_binning: bool = True,
|
390 |
lora_scale: float = 0.5,
|
|
|
|
|
|
|
|
|
|
|
391 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
392 |
):
|
393 |
#torch.set_default_device('cuda')
|
@@ -414,6 +441,9 @@ def generate_90(
|
|
414 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
415 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
416 |
batch_options = options.copy()
|
|
|
|
|
|
|
417 |
rv_image = pipe(**batch_options).images[0]
|
418 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
419 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
@@ -537,6 +567,34 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
537 |
step=10,
|
538 |
value=180,
|
539 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
|
541 |
gr.Examples(
|
542 |
examples=examples,
|
@@ -568,6 +626,11 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
568 |
guidance_scale,
|
569 |
num_inference_steps,
|
570 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
571 |
],
|
572 |
outputs=[result],
|
573 |
)
|
@@ -589,6 +652,11 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
589 |
guidance_scale,
|
590 |
num_inference_steps,
|
591 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
592 |
],
|
593 |
outputs=[result],
|
594 |
)
|
@@ -610,6 +678,11 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
610 |
guidance_scale,
|
611 |
num_inference_steps,
|
612 |
lora_scale,
|
|
|
|
|
|
|
|
|
|
|
613 |
],
|
614 |
outputs=[result],
|
615 |
)
|
|
|
168 |
|
169 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
170 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
171 |
+
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
172 |
pipe.vae = vaeX #.to(torch.bfloat16)
|
173 |
+
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
174 |
+
|
175 |
#pipe.unet = unetX
|
176 |
|
177 |
#pipe.vae.do_resize=False
|
|
|
204 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
205 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
206 |
#pipe.unet.set_default_attn_processor()
|
207 |
+
#**** BETTER WAY ****#
|
208 |
+
pipe.to(device, torch.bfloat16)
|
209 |
+
#**** BETTER WAY ****#
|
210 |
+
|
211 |
+
#pipe.to(device)
|
212 |
+
#pipe.to(torch.bfloat16)
|
213 |
|
214 |
print(f'Pipeline: ')
|
215 |
#print(f'_optional_components: {pipe._optional_components}')
|
|
|
282 |
num_inference_steps: int = 125,
|
283 |
use_resolution_binning: bool = True,
|
284 |
lora_scale: float = 0.5,
|
285 |
+
b1:float = 1.0,
|
286 |
+
b2:float = 1.0,
|
287 |
+
s1:float = 1.0,
|
288 |
+
s2:float = 1.0,
|
289 |
+
freeu: bool = False,
|
290 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
291 |
):
|
292 |
torch.set_default_device('cuda')
|
|
|
313 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
314 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
315 |
batch_options = options.copy()
|
316 |
+
if freeu:
|
317 |
+
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
318 |
+
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
319 |
rv_image = pipe(**batch_options).images[0]
|
320 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
321 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
346 |
num_inference_steps: int = 250,
|
347 |
use_resolution_binning: bool = True,
|
348 |
lora_scale: float = 0.5,
|
349 |
+
b1:float = 1.0,
|
350 |
+
b2:float = 1.0,
|
351 |
+
s1:float = 1.0,
|
352 |
+
s2:float = 1.0,
|
353 |
+
freeu: bool = False,
|
354 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
355 |
):
|
356 |
#torch.set_default_device('cuda')
|
|
|
377 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
378 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
379 |
batch_options = options.copy()
|
380 |
+
if freeu:
|
381 |
+
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
382 |
+
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
383 |
rv_image = pipe(**batch_options).images[0]
|
384 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
385 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
410 |
num_inference_steps: int = 250,
|
411 |
use_resolution_binning: bool = True,
|
412 |
lora_scale: float = 0.5,
|
413 |
+
b1:float = 1.0,
|
414 |
+
b2:float = 1.0,
|
415 |
+
s1:float = 1.0,
|
416 |
+
s2:float = 1.0,
|
417 |
+
freeu: bool = False,
|
418 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
419 |
):
|
420 |
#torch.set_default_device('cuda')
|
|
|
441 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
442 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
443 |
batch_options = options.copy()
|
444 |
+
if freeu:
|
445 |
+
register_free_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
446 |
+
register_free_crossattn_upblock2d(pip, b1=b1, b2=b2, s1=s1, s2=s1)
|
447 |
rv_image = pipe(**batch_options).images[0]
|
448 |
sd_image_path = f"rv50_A_{timestamp}.png"
|
449 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
|
|
567 |
step=10,
|
568 |
value=180,
|
569 |
)
|
570 |
+
with gr.Row():
|
571 |
+
freeu = gr.Checkbox(label="Use FreeU", value=False)
|
572 |
+
|
573 |
+
b1 = gr.Slider(label='b1: backbone factor of the first stage block of decoder',
|
574 |
+
minimum=1,
|
575 |
+
maximum=1.6,
|
576 |
+
step=0.01,
|
577 |
+
value=1.1
|
578 |
+
)
|
579 |
+
b2 = gr.Slider(label='b2: backbone factor of the second stage block of decoder',
|
580 |
+
minimum=1,
|
581 |
+
maximum=1.6,
|
582 |
+
step=0.01,
|
583 |
+
value=1.2
|
584 |
+
)
|
585 |
+
with gr.Row():
|
586 |
+
s1 = gr.Slider(label='s1: skip factor of the first stage block of decoder',
|
587 |
+
minimum=0,
|
588 |
+
maximum=1,
|
589 |
+
step=0.1,
|
590 |
+
value=0.2
|
591 |
+
)
|
592 |
+
s2 = gr.Slider(label='s2: skip factor of the second stage block of decoder',
|
593 |
+
minimum=0,
|
594 |
+
maximum=1,
|
595 |
+
step=0.1,
|
596 |
+
value=0.2
|
597 |
+
)
|
598 |
|
599 |
gr.Examples(
|
600 |
examples=examples,
|
|
|
626 |
guidance_scale,
|
627 |
num_inference_steps,
|
628 |
lora_scale,
|
629 |
+
b1,
|
630 |
+
b2,
|
631 |
+
s1,
|
632 |
+
s2,
|
633 |
+
freeu,
|
634 |
],
|
635 |
outputs=[result],
|
636 |
)
|
|
|
652 |
guidance_scale,
|
653 |
num_inference_steps,
|
654 |
lora_scale,
|
655 |
+
b1,
|
656 |
+
b2,
|
657 |
+
s1,
|
658 |
+
s2,
|
659 |
+
freeu,
|
660 |
],
|
661 |
outputs=[result],
|
662 |
)
|
|
|
678 |
guidance_scale,
|
679 |
num_inference_steps,
|
680 |
lora_scale,
|
681 |
+
b1,
|
682 |
+
b2,
|
683 |
+
s1,
|
684 |
+
s2,
|
685 |
+
freeu,
|
686 |
],
|
687 |
outputs=[result],
|
688 |
)
|