Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -273,7 +273,8 @@ def generate_30(
|
|
273 |
num_inference_steps: int = 125,
|
274 |
randomize_seed: bool = False,
|
275 |
use_resolution_binning: bool = True,
|
276 |
-
num_images: int = 1,
|
|
|
277 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
278 |
):
|
279 |
torch.backends.cudnn.benchmark = False
|
@@ -288,6 +289,7 @@ def generate_30(
|
|
288 |
"prompt": [prompt] * num_images,
|
289 |
"negative_prompt": [negative_prompt],
|
290 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
291 |
"width": width,
|
292 |
"height": height,
|
293 |
"guidance_scale": guidance_scale,
|
@@ -339,6 +341,7 @@ def generate_60(
|
|
339 |
randomize_seed: bool = False,
|
340 |
use_resolution_binning: bool = True,
|
341 |
num_images: int = 1,
|
|
|
342 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
343 |
):
|
344 |
torch.backends.cudnn.benchmark = True
|
@@ -353,6 +356,7 @@ def generate_60(
|
|
353 |
"prompt": [prompt] * num_images,
|
354 |
"negative_prompt": [negative_prompt],
|
355 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
356 |
"width": width,
|
357 |
"height": height,
|
358 |
"guidance_scale": guidance_scale,
|
@@ -403,7 +407,8 @@ def generate_90(
|
|
403 |
num_inference_steps: int = 250,
|
404 |
randomize_seed: bool = False,
|
405 |
use_resolution_binning: bool = True,
|
406 |
-
num_images: int = 1,
|
|
|
407 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
408 |
):
|
409 |
torch.backends.cudnn.benchmark = True
|
@@ -418,6 +423,7 @@ def generate_90(
|
|
418 |
"prompt": [prompt] * num_images,
|
419 |
"negative_prompt": [negative_prompt],
|
420 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
421 |
"width": width,
|
422 |
"height": height,
|
423 |
"guidance_scale": guidance_scale,
|
@@ -561,6 +567,13 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
561 |
step=0.1,
|
562 |
value=3.8,
|
563 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
564 |
num_inference_steps = gr.Slider(
|
565 |
label="Number of inference steps",
|
566 |
minimum=10,
|
@@ -601,6 +614,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
601 |
num_inference_steps,
|
602 |
randomize_seed,
|
603 |
num_images,
|
|
|
604 |
],
|
605 |
outputs=[result, seed],
|
606 |
)
|
@@ -624,6 +638,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
624 |
num_inference_steps,
|
625 |
randomize_seed,
|
626 |
num_images,
|
|
|
627 |
],
|
628 |
outputs=[result, seed],
|
629 |
)
|
@@ -647,6 +662,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
647 |
num_inference_steps,
|
648 |
randomize_seed,
|
649 |
num_images,
|
|
|
650 |
],
|
651 |
outputs=[result, seed],
|
652 |
)
|
|
|
273 |
num_inference_steps: int = 125,
|
274 |
randomize_seed: bool = False,
|
275 |
use_resolution_binning: bool = True,
|
276 |
+
num_images: int = 1,
|
277 |
+
strength: float = 0.3,
|
278 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
279 |
):
|
280 |
torch.backends.cudnn.benchmark = False
|
|
|
289 |
"prompt": [prompt] * num_images,
|
290 |
"negative_prompt": [negative_prompt],
|
291 |
"negative_prompt_2": [neg_prompt_2],
|
292 |
+
"strength": denoise,
|
293 |
"width": width,
|
294 |
"height": height,
|
295 |
"guidance_scale": guidance_scale,
|
|
|
341 |
randomize_seed: bool = False,
|
342 |
use_resolution_binning: bool = True,
|
343 |
num_images: int = 1,
|
344 |
+
strength: float = 0.3,
|
345 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
346 |
):
|
347 |
torch.backends.cudnn.benchmark = True
|
|
|
356 |
"prompt": [prompt] * num_images,
|
357 |
"negative_prompt": [negative_prompt],
|
358 |
"negative_prompt_2": [neg_prompt_2],
|
359 |
+
"strength": denoise,
|
360 |
"width": width,
|
361 |
"height": height,
|
362 |
"guidance_scale": guidance_scale,
|
|
|
407 |
num_inference_steps: int = 250,
|
408 |
randomize_seed: bool = False,
|
409 |
use_resolution_binning: bool = True,
|
410 |
+
num_images: int = 1,
|
411 |
+
strength: float = 0.3,
|
412 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
413 |
):
|
414 |
torch.backends.cudnn.benchmark = True
|
|
|
423 |
"prompt": [prompt] * num_images,
|
424 |
"negative_prompt": [negative_prompt],
|
425 |
"negative_prompt_2": [neg_prompt_2],
|
426 |
+
"strength": denoise,
|
427 |
"width": width,
|
428 |
"height": height,
|
429 |
"guidance_scale": guidance_scale,
|
|
|
567 |
step=0.1,
|
568 |
value=3.8,
|
569 |
)
|
570 |
+
denoise = gr.Slider(
|
571 |
+
label="Denoise Strength",
|
572 |
+
minimum=0.0,
|
573 |
+
maximum=1.0,
|
574 |
+
step=0.01,
|
575 |
+
value=0.3,
|
576 |
+
)
|
577 |
num_inference_steps = gr.Slider(
|
578 |
label="Number of inference steps",
|
579 |
minimum=10,
|
|
|
614 |
num_inference_steps,
|
615 |
randomize_seed,
|
616 |
num_images,
|
617 |
+
denoise
|
618 |
],
|
619 |
outputs=[result, seed],
|
620 |
)
|
|
|
638 |
num_inference_steps,
|
639 |
randomize_seed,
|
640 |
num_images,
|
641 |
+
denoise
|
642 |
],
|
643 |
outputs=[result, seed],
|
644 |
)
|
|
|
662 |
num_inference_steps,
|
663 |
randomize_seed,
|
664 |
num_images,
|
665 |
+
denoise
|
666 |
],
|
667 |
outputs=[result, seed],
|
668 |
)
|