Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,32 +7,8 @@ from gradio_imageslider import ImageSlider
|
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
from controlnet_union import ControlNetModel_Union
|
| 9 |
from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
|
| 10 |
-
from gradio_image_prompter import ImagePrompter
|
| 11 |
from PIL import Image, ImageDraw
|
| 12 |
import numpy as np
|
| 13 |
-
# from sam2.sam2_image_predictor import SAM2ImagePredictor
|
| 14 |
-
# from sam2_mask import create_sam2_tab
|
| 15 |
-
import subprocess
|
| 16 |
-
import os
|
| 17 |
-
|
| 18 |
-
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
| 19 |
-
HF_TOKEN = os.environ["HF_TOKEN"]
|
| 20 |
-
|
| 21 |
-
# class SAM2PredictorSingleton:
|
| 22 |
-
# _instance = None
|
| 23 |
-
|
| 24 |
-
# def __new__(cls):
|
| 25 |
-
# if cls._instance is None:
|
| 26 |
-
# cls._instance = super(SAM2PredictorSingleton, cls).__new__(cls)
|
| 27 |
-
# cls._instance._initialize_predictor()
|
| 28 |
-
# return cls._instance
|
| 29 |
-
|
| 30 |
-
# def _initialize_predictor(self):
|
| 31 |
-
# MODEL = "facebook/sam2-hiera-large"
|
| 32 |
-
# DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 33 |
-
# self.predictor = SAM2ImagePredictor.from_pretrained(MODEL, device=DEVICE)
|
| 34 |
-
|
| 35 |
-
PREDICTOR = None
|
| 36 |
|
| 37 |
MODELS = {
|
| 38 |
"RealVisXL V5.0 Lightning": "SG161222/RealVisXL_V5.0_Lightning",
|
|
@@ -81,59 +57,6 @@ def load_default_pipeline():
|
|
| 81 |
).to("cuda")
|
| 82 |
return gr.update(value="Default pipeline loaded!")
|
| 83 |
|
| 84 |
-
# @spaces.GPU()
|
| 85 |
-
# def predict_masks(prompts):
|
| 86 |
-
|
| 87 |
-
# DEVICE = torch.device("cuda")
|
| 88 |
-
# SAM_MODEL = "facebook/sam2.1-hiera-large"
|
| 89 |
-
# # if PREDICTOR is None:
|
| 90 |
-
# # PREDICTOR = SAM2ImagePredictor.from_pretrained(SAM_MODEL, device=DEVICE)
|
| 91 |
-
# # else:
|
| 92 |
-
# # PREDICTOR = PREDICTOR
|
| 93 |
-
# PREDICTOR = SAM2ImagePredictor.from_pretrained(SAM_MODEL, device=DEVICE)
|
| 94 |
-
|
| 95 |
-
# """Predict a single mask from the image based on selected points."""
|
| 96 |
-
# image = np.array(prompts["image"]) # Convert the image to a numpy array
|
| 97 |
-
# points = prompts["points"] # Get the points from prompts
|
| 98 |
-
|
| 99 |
-
# if not points:
|
| 100 |
-
# return image # Return the original image if no points are selected
|
| 101 |
-
|
| 102 |
-
# # Debugging: Print the structure of points
|
| 103 |
-
# print(f"Points structure: {points}")
|
| 104 |
-
|
| 105 |
-
# # Ensure points is a list of lists with at least two elements
|
| 106 |
-
# if isinstance(points, list) and all(isinstance(point, list) and len(point) >= 2 for point in points):
|
| 107 |
-
# input_points = [[point[0], point[1]] for point in points]
|
| 108 |
-
# else:
|
| 109 |
-
# return image # Return the original image if points structure is unexpected
|
| 110 |
-
|
| 111 |
-
# input_labels = [1] * len(input_points)
|
| 112 |
-
|
| 113 |
-
# with torch.inference_mode():
|
| 114 |
-
# PREDICTOR.set_image(image)
|
| 115 |
-
# masks, _, _ = PREDICTOR.predict(
|
| 116 |
-
# point_coords=input_points, point_labels=input_labels, multimask_output=False
|
| 117 |
-
# )
|
| 118 |
-
|
| 119 |
-
# # Prepare the overlay image
|
| 120 |
-
# red_mask = np.zeros_like(image)
|
| 121 |
-
# if masks and len(masks) > 0:
|
| 122 |
-
# red_mask[:, :, 0] = masks[0].astype(np.uint8) * 255 # Apply the red channel
|
| 123 |
-
# red_mask = PILImage.fromarray(red_mask)
|
| 124 |
-
# original_image = PILImage.fromarray(image)
|
| 125 |
-
# blended_image = PILImage.blend(original_image, red_mask, alpha=0.5)
|
| 126 |
-
# return np.array(blended_image)
|
| 127 |
-
# else:
|
| 128 |
-
# return image
|
| 129 |
-
|
| 130 |
-
# def update_mask(prompts):
|
| 131 |
-
# """Update the mask based on the prompts."""
|
| 132 |
-
# image = prompts["image"]
|
| 133 |
-
# points = prompts["points"]
|
| 134 |
-
# return predict_masks(image, points)
|
| 135 |
-
|
| 136 |
-
|
| 137 |
@spaces.GPU(duration=12)
|
| 138 |
def fill_image(prompt, image, model_selection, paste_back):
|
| 139 |
print(f"Received image: {image}")
|
|
@@ -193,8 +116,6 @@ def prepare_image_and_mask(image, width, height, overlap_percentage, resize_opti
|
|
| 193 |
resize_percentage = 100
|
| 194 |
elif resize_option == "80%":
|
| 195 |
resize_percentage = 80
|
| 196 |
-
elif resize_option == "67%":
|
| 197 |
-
resize_percentage = 67
|
| 198 |
elif resize_option == "50%":
|
| 199 |
resize_percentage = 50
|
| 200 |
elif resize_option == "33%":
|
|
@@ -387,7 +308,6 @@ def clear_cache():
|
|
| 387 |
torch.cuda.empty_cache()
|
| 388 |
return gr.update(value="Cache cleared!")
|
| 389 |
|
| 390 |
-
|
| 391 |
css = """
|
| 392 |
.nulgradio-container {
|
| 393 |
width: 86vw !important;
|
|
@@ -399,9 +319,6 @@ css = """
|
|
| 399 |
div#component-17 {
|
| 400 |
height: auto !important;
|
| 401 |
}
|
| 402 |
-
div#component-46{
|
| 403 |
-
height: 100% !important;
|
| 404 |
-
}
|
| 405 |
"""
|
| 406 |
|
| 407 |
title = """<h1 align="center">Diffusers Image Outpaint</h1>
|
|
@@ -414,7 +331,6 @@ title = """<h1 align="center">Diffusers Image Outpaint</h1>
|
|
| 414 |
</p>
|
| 415 |
</div>
|
| 416 |
"""
|
| 417 |
-
sam_block = gr.load(name="spaces/LPX55/SAM2_1-Image-Predictor-Masking-Tool-CPU")
|
| 418 |
|
| 419 |
with gr.Blocks(css=css, fill_height=True) as demo:
|
| 420 |
gr.Markdown("# Diffusers Inpaint and Outpaint")
|
|
@@ -556,6 +472,15 @@ with gr.Blocks(css=css, fill_height=True) as demo:
|
|
| 556 |
)
|
| 557 |
with gr.Column():
|
| 558 |
preview_button = gr.Button("Preview alignment and mask")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 559 |
with gr.Column():
|
| 560 |
result_outpaint = ImageSlider(
|
| 561 |
interactive=False,
|
|
@@ -564,34 +489,6 @@ with gr.Blocks(css=css, fill_height=True) as demo:
|
|
| 564 |
use_as_input_button_outpaint = gr.Button("Use as Input Image", visible=False)
|
| 565 |
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
|
| 566 |
preview_image = gr.Image(label="Preview")
|
| 567 |
-
# with gr.TabItem("SAM2 Masking"):
|
| 568 |
-
# input_image, points_map, output_result_mask = create_sam2_tab()
|
| 569 |
-
# with gr.TabItem("SAM2 Mask"):
|
| 570 |
-
# gr.Markdown("# Object Segmentation with SAM2")
|
| 571 |
-
# gr.Markdown(
|
| 572 |
-
# """
|
| 573 |
-
# This application utilizes **Segment Anything V2 (SAM2)** to allow you to upload an image and interactively generate a segmentation mask based on multiple points you select on the image.
|
| 574 |
-
# """
|
| 575 |
-
# )
|
| 576 |
-
# with gr.Row():
|
| 577 |
-
# with gr.Column():
|
| 578 |
-
# image_input = gr.State()
|
| 579 |
-
# # Input: ImagePrompter for uploaded image
|
| 580 |
-
# upload_image_input = ImagePrompter(show_label=False)
|
| 581 |
-
# with gr.Column():
|
| 582 |
-
# image_output = gr.Image(label="Segmented Image", type="pil", height=400)
|
| 583 |
-
# with gr.Row():
|
| 584 |
-
# # Button to trigger the prediction
|
| 585 |
-
# predict_button = gr.Button("Predict Mask")
|
| 586 |
-
|
| 587 |
-
# # Define the action triggered by the predict button
|
| 588 |
-
# predict_button.click(
|
| 589 |
-
# fn=predict_masks,
|
| 590 |
-
# inputs=[upload_image_input],
|
| 591 |
-
# outputs=[image_output],
|
| 592 |
-
# show_progress=True,
|
| 593 |
-
# )
|
| 594 |
-
|
| 595 |
with gr.TabItem("Misc"):
|
| 596 |
with gr.Column():
|
| 597 |
clear_cache_button = gr.Button("Clear CUDA Cache")
|
|
@@ -608,11 +505,6 @@ with gr.Blocks(css=css, fill_height=True) as demo:
|
|
| 608 |
inputs=None,
|
| 609 |
outputs=load_default_message,
|
| 610 |
)
|
| 611 |
-
|
| 612 |
-
# upload_image_input.change(
|
| 613 |
-
# fn=lambda img: img, inputs=upload_image_input, outputs=image_input
|
| 614 |
-
# )
|
| 615 |
-
|
| 616 |
target_ratio.change(
|
| 617 |
fn=preload_presets,
|
| 618 |
inputs=[target_ratio, width_slider, height_slider],
|
|
|
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
from controlnet_union import ControlNetModel_Union
|
| 9 |
from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
|
|
|
|
| 10 |
from PIL import Image, ImageDraw
|
| 11 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
MODELS = {
|
| 14 |
"RealVisXL V5.0 Lightning": "SG161222/RealVisXL_V5.0_Lightning",
|
|
|
|
| 57 |
).to("cuda")
|
| 58 |
return gr.update(value="Default pipeline loaded!")
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
@spaces.GPU(duration=12)
|
| 61 |
def fill_image(prompt, image, model_selection, paste_back):
|
| 62 |
print(f"Received image: {image}")
|
|
|
|
| 116 |
resize_percentage = 100
|
| 117 |
elif resize_option == "80%":
|
| 118 |
resize_percentage = 80
|
|
|
|
|
|
|
| 119 |
elif resize_option == "50%":
|
| 120 |
resize_percentage = 50
|
| 121 |
elif resize_option == "33%":
|
|
|
|
| 308 |
torch.cuda.empty_cache()
|
| 309 |
return gr.update(value="Cache cleared!")
|
| 310 |
|
|
|
|
| 311 |
css = """
|
| 312 |
.nulgradio-container {
|
| 313 |
width: 86vw !important;
|
|
|
|
| 319 |
div#component-17 {
|
| 320 |
height: auto !important;
|
| 321 |
}
|
|
|
|
|
|
|
|
|
|
| 322 |
"""
|
| 323 |
|
| 324 |
title = """<h1 align="center">Diffusers Image Outpaint</h1>
|
|
|
|
| 331 |
</p>
|
| 332 |
</div>
|
| 333 |
"""
|
|
|
|
| 334 |
|
| 335 |
with gr.Blocks(css=css, fill_height=True) as demo:
|
| 336 |
gr.Markdown("# Diffusers Inpaint and Outpaint")
|
|
|
|
| 472 |
)
|
| 473 |
with gr.Column():
|
| 474 |
preview_button = gr.Button("Preview alignment and mask")
|
| 475 |
+
gr.Examples(
|
| 476 |
+
examples=[
|
| 477 |
+
["./examples/example_1.webp", 1280, 720, "Middle"],
|
| 478 |
+
["./examples/example_2.jpg", 1440, 810, "Left"],
|
| 479 |
+
["./examples/example_3.jpg", 1024, 1024, "Top"],
|
| 480 |
+
["./examples/example_3.jpg", 1024, 1024, "Bottom"],
|
| 481 |
+
],
|
| 482 |
+
inputs=[input_image_outpaint, width_slider, height_slider, alignment_dropdown],
|
| 483 |
+
)
|
| 484 |
with gr.Column():
|
| 485 |
result_outpaint = ImageSlider(
|
| 486 |
interactive=False,
|
|
|
|
| 489 |
use_as_input_button_outpaint = gr.Button("Use as Input Image", visible=False)
|
| 490 |
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
|
| 491 |
preview_image = gr.Image(label="Preview")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 492 |
with gr.TabItem("Misc"):
|
| 493 |
with gr.Column():
|
| 494 |
clear_cache_button = gr.Button("Clear CUDA Cache")
|
|
|
|
| 505 |
inputs=None,
|
| 506 |
outputs=load_default_message,
|
| 507 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 508 |
target_ratio.change(
|
| 509 |
fn=preload_presets,
|
| 510 |
inputs=[target_ratio, width_slider, height_slider],
|