Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,10 @@ from lang_sam import LangSAM
|
|
6 |
from color_matcher import ColorMatcher
|
7 |
from color_matcher.normalizer import Normalizer
|
8 |
import torch
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Load the LangSAM model
|
11 |
model = LangSAM() # Use the default model or specify custom checkpoint if necessary
|
@@ -15,7 +19,7 @@ def extract_masks(image_pil, prompts):
|
|
15 |
masks_dict = {}
|
16 |
for prompt in prompts_list:
|
17 |
masks, boxes, phrases, logits = model.predict(image_pil, prompt)
|
18 |
-
if masks:
|
19 |
masks_np = masks[0].cpu().numpy()
|
20 |
mask = (masks_np > 0).astype(np.uint8) * 255 # Binary mask
|
21 |
masks_dict[prompt] = mask
|
@@ -142,13 +146,13 @@ def gradio_interface():
|
|
142 |
initial_image = gr.Image(type="pil", label="Upload Image")
|
143 |
prompts = gr.Textbox(lines=1, placeholder="Enter prompts separated by commas (e.g., sky, grass)", label="Prompts")
|
144 |
segment_button = gr.Button("Segment Image")
|
145 |
-
segment_dropdown = gr.Dropdown(label="Select Segment", choices=[])
|
146 |
replacement_image = gr.Image(type="pil", label="Replacement Image (optional)")
|
147 |
color_ref_image = gr.Image(type="pil", label="Color Reference Image (optional)")
|
148 |
apply_replacement = gr.Checkbox(label="Apply Replacement", value=False)
|
149 |
apply_color_grading = gr.Checkbox(label="Apply Color Grading", value=False)
|
150 |
apply_color_to_full_image = gr.Checkbox(label="Apply Color Correction to Full Image", value=False)
|
151 |
-
blending_amount = gr.Slider(minimum=0, maximum=
|
152 |
apply_button = gr.Button("Apply Changes")
|
153 |
undo_button = gr.Button("Undo")
|
154 |
with gr.Column():
|
@@ -160,9 +164,9 @@ def gradio_interface():
|
|
160 |
if initial_image_pil is not None:
|
161 |
image_history = [initial_image_pil]
|
162 |
current_image_pil = initial_image_pil
|
163 |
-
return current_image_pil, image_history, initial_image_pil, {}, [], "Image loaded."
|
164 |
else:
|
165 |
-
return None, [], None, {}, [], "No image loaded."
|
166 |
|
167 |
# When the initial image is uploaded, initialize the image history
|
168 |
initial_image.upload(fn=initialize_image, inputs=initial_image, outputs=[current_image_pil, image_history, current_image_display, masks_dict, segment_dropdown, status])
|
@@ -170,10 +174,10 @@ def gradio_interface():
|
|
170 |
# Segment button click
|
171 |
def segment_image_wrapper(current_image_pil, prompts):
|
172 |
if current_image_pil is None:
|
173 |
-
return "No image uploaded.", {}, []
|
174 |
masks = extract_masks(current_image_pil, prompts)
|
175 |
if not masks:
|
176 |
-
return "No masks detected for the given prompts.", {}, []
|
177 |
dropdown_choices = list(masks.keys())
|
178 |
return "Segmentation completed.", masks, gr.Dropdown.update(choices=dropdown_choices, value=dropdown_choices[0])
|
179 |
|
|
|
6 |
from color_matcher import ColorMatcher
|
7 |
from color_matcher.normalizer import Normalizer
|
8 |
import torch
|
9 |
+
import warnings
|
10 |
+
|
11 |
+
# Suppress specific warnings if desired
|
12 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
13 |
|
14 |
# Load the LangSAM model
|
15 |
model = LangSAM() # Use the default model or specify custom checkpoint if necessary
|
|
|
19 |
masks_dict = {}
|
20 |
for prompt in prompts_list:
|
21 |
masks, boxes, phrases, logits = model.predict(image_pil, prompt)
|
22 |
+
if masks is not None and len(masks) > 0:
|
23 |
masks_np = masks[0].cpu().numpy()
|
24 |
mask = (masks_np > 0).astype(np.uint8) * 255 # Binary mask
|
25 |
masks_dict[prompt] = mask
|
|
|
146 |
initial_image = gr.Image(type="pil", label="Upload Image")
|
147 |
prompts = gr.Textbox(lines=1, placeholder="Enter prompts separated by commas (e.g., sky, grass)", label="Prompts")
|
148 |
segment_button = gr.Button("Segment Image")
|
149 |
+
segment_dropdown = gr.Dropdown(label="Select Segment", choices=[], allow_custom_value=True)
|
150 |
replacement_image = gr.Image(type="pil", label="Replacement Image (optional)")
|
151 |
color_ref_image = gr.Image(type="pil", label="Color Reference Image (optional)")
|
152 |
apply_replacement = gr.Checkbox(label="Apply Replacement", value=False)
|
153 |
apply_color_grading = gr.Checkbox(label="Apply Color Grading", value=False)
|
154 |
apply_color_to_full_image = gr.Checkbox(label="Apply Color Correction to Full Image", value=False)
|
155 |
+
blending_amount = gr.Slider(minimum=0, maximum=50, step=1, label="Blending Amount", value=0)
|
156 |
apply_button = gr.Button("Apply Changes")
|
157 |
undo_button = gr.Button("Undo")
|
158 |
with gr.Column():
|
|
|
164 |
if initial_image_pil is not None:
|
165 |
image_history = [initial_image_pil]
|
166 |
current_image_pil = initial_image_pil
|
167 |
+
return current_image_pil, image_history, initial_image_pil, {}, gr.Dropdown.update(choices=[], value=None), "Image loaded."
|
168 |
else:
|
169 |
+
return None, [], None, {}, gr.Dropdown.update(choices=[], value=None), "No image loaded."
|
170 |
|
171 |
# When the initial image is uploaded, initialize the image history
|
172 |
initial_image.upload(fn=initialize_image, inputs=initial_image, outputs=[current_image_pil, image_history, current_image_display, masks_dict, segment_dropdown, status])
|
|
|
174 |
# Segment button click
|
175 |
def segment_image_wrapper(current_image_pil, prompts):
|
176 |
if current_image_pil is None:
|
177 |
+
return "No image uploaded.", {}, gr.Dropdown.update(choices=[], value=None)
|
178 |
masks = extract_masks(current_image_pil, prompts)
|
179 |
if not masks:
|
180 |
+
return "No masks detected for the given prompts.", {}, gr.Dropdown.update(choices=[], value=None)
|
181 |
dropdown_choices = list(masks.keys())
|
182 |
return "Segmentation completed.", masks, gr.Dropdown.update(choices=dropdown_choices, value=dropdown_choices[0])
|
183 |
|