Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -65,25 +65,16 @@ def inference_point(input_img, evt: gr.SelectData,):
|
|
65 |
x, y = evt.index[0], evt.index[1]
|
66 |
points = [[x, y]]
|
67 |
print(f"Selected point: {points}")
|
68 |
-
import time
|
69 |
-
start_time = time.time()
|
70 |
mp.set_start_method("spawn", force=True)
|
71 |
config_file = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
72 |
cfg = setup_cfg(config_file)
|
73 |
|
74 |
demo = SAMPointVisualizationDemo(cfg, 0.8, sam2_model, clip_model,mask_adapter)
|
75 |
-
end_time = time.time()
|
76 |
-
print("init time",end_time - start_time)
|
77 |
|
78 |
-
start_time = time.time()
|
79 |
img = read_image(input_img, format="BGR")
|
80 |
|
81 |
-
# Assume 'points' is a list of (x, y) coordinates to specify where the user clicks
|
82 |
-
# Process the image and points to create a segmentation map accordingly
|
83 |
text_features = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
|
84 |
_, visualized_output = demo.run_on_image_with_points(img, points,text_features)
|
85 |
-
end_time = time.time()
|
86 |
-
print("inf time",end_time - start_time)
|
87 |
return visualized_output
|
88 |
|
89 |
|
@@ -171,8 +162,10 @@ with gr.Blocks() as demo:
|
|
171 |
with gr.TabItem("Point Mode"):
|
172 |
with gr.Row(): # 水平排列
|
173 |
with gr.Column():
|
|
|
|
|
174 |
input_image = gr.Image(type='filepath', label="Upload Image", interactive=True) # 上传图片并允许交互
|
175 |
-
points_input = gr.State(value=
|
176 |
|
177 |
with gr.Column(): # 第二列:分割图输出
|
178 |
output_image_point = gr.Image(type="pil", label='Segmentation Map') # 输出分割图
|
|
|
65 |
x, y = evt.index[0], evt.index[1]
|
66 |
points = [[x, y]]
|
67 |
print(f"Selected point: {points}")
|
|
|
|
|
68 |
mp.set_start_method("spawn", force=True)
|
69 |
config_file = './configs/ground-truth-warmup/mask-adapter/mask_adapter_convnext_large_cocopan_eval_ade20k.yaml'
|
70 |
cfg = setup_cfg(config_file)
|
71 |
|
72 |
demo = SAMPointVisualizationDemo(cfg, 0.8, sam2_model, clip_model,mask_adapter)
|
|
|
|
|
73 |
|
|
|
74 |
img = read_image(input_img, format="BGR")
|
75 |
|
|
|
|
|
76 |
text_features = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
|
77 |
_, visualized_output = demo.run_on_image_with_points(img, points,text_features)
|
|
|
|
|
78 |
return visualized_output
|
79 |
|
80 |
|
|
|
162 |
with gr.TabItem("Point Mode"):
|
163 |
with gr.Row(): # 水平排列
|
164 |
with gr.Column():
|
165 |
+
def init_state():
|
166 |
+
return []
|
167 |
input_image = gr.Image(type='filepath', label="Upload Image", interactive=True) # 上传图片并允许交互
|
168 |
+
points_input = gr.State(value=init_state()) # 用于存储点击的点
|
169 |
|
170 |
with gr.Column(): # 第二列:分割图输出
|
171 |
output_image_point = gr.Image(type="pil", label='Segmentation Map') # 输出分割图
|