Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -552,6 +552,9 @@ def process_input(input_image, upscale_factor, **kwargs):
|
|
| 552 |
|
| 553 |
return input_image.resize((w, h)), w_original, h_original, was_resized
|
| 554 |
|
|
|
|
|
|
|
|
|
|
| 555 |
@spaces.GPU
|
| 556 |
def infer_upscale(
|
| 557 |
seed,
|
|
@@ -573,7 +576,7 @@ def infer_upscale(
|
|
| 573 |
|
| 574 |
# rescale with upscale factor
|
| 575 |
w, h = input_image.size
|
| 576 |
-
control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
| 577 |
|
| 578 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 579 |
|
|
@@ -588,24 +591,29 @@ def infer_upscale(
|
|
| 588 |
# Convert to tensor and add batch dimension
|
| 589 |
control_image = torch.from_numpy(np.array(control_image)).permute(2, 0, 1).float().unsqueeze(0).to(device) / 255.0
|
| 590 |
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 599 |
|
| 600 |
if was_resized:
|
| 601 |
gr.Info(
|
| 602 |
f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
|
| 603 |
)
|
| 604 |
-
|
| 605 |
-
# resize to target desired size
|
| 606 |
-
image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
|
| 607 |
-
|
| 608 |
-
# image is already a PIL Image, no need for further conversion
|
| 609 |
|
| 610 |
return image, seed, num_inference_steps, upscale_factor, controlnet_conditioning_scale, gr.update(), gr.update(visible=False)
|
| 611 |
except Exception as e:
|
|
@@ -715,10 +723,10 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
|
|
| 715 |
controlnet_conditioning_scale = gr.Slider(
|
| 716 |
label="Controlnet Conditioning Scale",
|
| 717 |
minimum=0.1,
|
| 718 |
-
maximum=1.
|
| 719 |
-
step=0.
|
| 720 |
-
value=0.
|
| 721 |
-
)
|
| 722 |
upscale_seed = gr.Slider(
|
| 723 |
label="Seed for Upscaling",
|
| 724 |
minimum=0,
|
|
|
|
| 552 |
|
| 553 |
return input_image.resize((w, h)), w_original, h_original, was_resized
|
| 554 |
|
| 555 |
+
from PIL import Image
|
| 556 |
+
import numpy as np
|
| 557 |
+
|
| 558 |
@spaces.GPU
|
| 559 |
def infer_upscale(
|
| 560 |
seed,
|
|
|
|
| 576 |
|
| 577 |
# rescale with upscale factor
|
| 578 |
w, h = input_image.size
|
| 579 |
+
control_image = input_image.resize((w * upscale_factor, h * upscale_factor), Image.LANCZOS)
|
| 580 |
|
| 581 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 582 |
|
|
|
|
| 591 |
# Convert to tensor and add batch dimension
|
| 592 |
control_image = torch.from_numpy(np.array(control_image)).permute(2, 0, 1).float().unsqueeze(0).to(device) / 255.0
|
| 593 |
|
| 594 |
+
with torch.no_grad():
|
| 595 |
+
image = pipe_upscale(
|
| 596 |
+
prompt="",
|
| 597 |
+
control_image=control_image,
|
| 598 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 599 |
+
num_inference_steps=num_inference_steps,
|
| 600 |
+
guidance_scale=3.5,
|
| 601 |
+
generator=generator,
|
| 602 |
+
).images[0]
|
| 603 |
+
|
| 604 |
+
# Convert the image back to PIL Image
|
| 605 |
+
if isinstance(image, torch.Tensor):
|
| 606 |
+
image = image.cpu().permute(1, 2, 0).numpy()
|
| 607 |
+
|
| 608 |
+
# Ensure the image data is in the correct range
|
| 609 |
+
image = np.clip(image * 255, 0, 255).astype(np.uint8)
|
| 610 |
+
image = Image.fromarray(image)
|
| 611 |
|
| 612 |
if was_resized:
|
| 613 |
gr.Info(
|
| 614 |
f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
|
| 615 |
)
|
| 616 |
+
image = image.resize((w_original * upscale_factor, h_original * upscale_factor), Image.LANCZOS)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 617 |
|
| 618 |
return image, seed, num_inference_steps, upscale_factor, controlnet_conditioning_scale, gr.update(), gr.update(visible=False)
|
| 619 |
except Exception as e:
|
|
|
|
| 723 |
controlnet_conditioning_scale = gr.Slider(
|
| 724 |
label="Controlnet Conditioning Scale",
|
| 725 |
minimum=0.1,
|
| 726 |
+
maximum=1.0,
|
| 727 |
+
step=0.05,
|
| 728 |
+
value=0.5, # 기본값을 0.5로 낮춤
|
| 729 |
+
)
|
| 730 |
upscale_seed = gr.Slider(
|
| 731 |
label="Seed for Upscaling",
|
| 732 |
minimum=0,
|