Spaces:
Runtime error
Runtime error
File size: 5,920 Bytes
0c52132 7e50af9 f6e3ce8 c263a47 03b9405 c263a47 7e50af9 c263a47 0757835 7e50af9 0c52132 03b9405 f6e3ce8 42c187d 54c9770 03b9405 f6e3ce8 7e50af9 03b9405 0b1fcba 03b9405 203e0e8 c263a47 03b9405 f6e3ce8 1a1d05a f6e3ce8 203e0e8 c263a47 203e0e8 7e50af9 c263a47 203e0e8 c263a47 7e50af9 c263a47 1a1d05a 203e0e8 1a1d05a 7e50af9 0757835 7e50af9 0757835 7e50af9 0757835 e0f6bc4 54c9770 7e50af9 54c9770 0c52132 1a1d05a c263a47 0c52132 1a1d05a 0c52132 203e0e8 54c9770 7e50af9 0757835 e0f6bc4 7e50af9 f6e3ce8 03b9405 7e50af9 f6e3ce8 03b9405 0757835 203e0e8 f6e3ce8 03b9405 7e50af9 e0f6bc4 0757835 7e50af9 f6e3ce8 7e50af9 0c52132 203e0e8 7e50af9 54c9770 7e50af9 54c9770 7e50af9 f6e3ce8 03b9405 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import os
from typing import List, Dict, Tuple, Any, Optional
import cv2
import gradio as gr
import numpy as np
import supervision as sv
import torch
from segment_anything import sam_model_registry
from gpt4v import prompt_image
from utils import postprocess_masks, Visualizer, extract_numbers_in_brackets
from sam_utils import sam_interactive_inference, sam_inference
HOME = os.getenv("HOME")
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
SAM_CHECKPOINT = os.path.join(HOME, "app/weights/sam_vit_h_4b8939.pth")
# SAM_CHECKPOINT = "weights/sam_vit_h_4b8939.pth"
SAM_MODEL_TYPE = "vit_h"
ANNOTATED_IMAGE_KEY = "annotated_image"
DETECTIONS_KEY = "detections"
MARKDOWN = """
[](https://arxiv.org/pdf/2310.11441.pdf)
<h1 style='text-align: center'>
<img
src='https://som-gpt4v.github.io/website/img/som_logo.png'
style='height:50px; display:inline-block'
/>
Set-of-Mark (SoM) Prompting Unleashes Extraordinary Visual Grounding in GPT-4V
</h1>
## 🚧 Roadmap
- [ ] Support for alphabetic labels
- [ ] Support for Semantic-SAM (multi-level)
- [ ] Support for mask filtering based on granularity
"""
SAM = sam_model_registry[SAM_MODEL_TYPE](checkpoint=SAM_CHECKPOINT).to(device=DEVICE)
def inference(
image_and_mask: Dict[str, np.ndarray],
annotation_mode: List[str],
mask_alpha: float
) -> Tuple[Tuple[np.ndarray, List[Tuple[np.ndarray, str]]], Dict[str, Any]]:
image = image_and_mask['image']
mask = cv2.cvtColor(image_and_mask['mask'], cv2.COLOR_RGB2GRAY)
is_interactive = not np.all(mask == 0)
visualizer = Visualizer(mask_opacity=mask_alpha)
if is_interactive:
detections = sam_interactive_inference(
image=image,
mask=mask,
model=SAM)
else:
detections = sam_inference(
image=image,
model=SAM
)
detections = postprocess_masks(
detections=detections)
bgr_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotated_image = visualizer.visualize(
image=bgr_image,
detections=detections,
with_box="Box" in annotation_mode,
with_mask="Mask" in annotation_mode,
with_polygon="Polygon" in annotation_mode,
with_label="Mark" in annotation_mode)
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
state = {
ANNOTATED_IMAGE_KEY: annotated_image,
DETECTIONS_KEY: detections
}
return (annotated_image, []), state
def prompt(
message: str,
history: List[List[str]],
state: Dict[str, Any],
api_key: Optional[str]
) -> str:
if api_key == "":
return "⚠️ Please set your OpenAI API key first"
if state is None or ANNOTATED_IMAGE_KEY not in state:
return "⚠️ Please generate SoM visual prompt first"
return prompt_image(
api_key=api_key,
image=cv2.cvtColor(state[ANNOTATED_IMAGE_KEY], cv2.COLOR_BGR2RGB),
prompt=message
)
def on_image_input_clear():
return None, {}
def highlight(
state: Dict[str, Any],
history: List[List[str]]
) -> Optional[Tuple[np.ndarray, List[Tuple[np.ndarray, str]]]]:
if DETECTIONS_KEY not in state or ANNOTATED_IMAGE_KEY not in state:
return None
detections: sv.Detections = state[DETECTIONS_KEY]
annotated_image: np.ndarray = state[ANNOTATED_IMAGE_KEY]
response = history[-1][-1]
detections_ids = extract_numbers_in_brackets(text=response)
highlighted_detections = [
(detections.mask[detection_id], str(detection_id))
for detection_id
in detections_ids
]
return annotated_image, highlighted_detections
image_input = gr.Image(
label="Input",
type="numpy",
tool="sketch",
interactive=True,
brush_radius=20.0,
brush_color="#FFFFFF"
)
checkbox_annotation_mode = gr.CheckboxGroup(
choices=["Mark", "Polygon", "Mask", "Box"],
value=['Mark'],
label="Annotation Mode")
slider_mask_alpha = gr.Slider(
minimum=0,
maximum=1,
value=0.05,
label="Mask Alpha")
image_output = gr.AnnotatedImage(
label="SoM Visual Prompt",
color_map={
str(i): sv.ColorPalette.default().by_idx(i).as_hex()
for i in range(64)
}
)
openai_api_key = gr.Textbox(
show_label=False,
placeholder="Before you start chatting, set your OpenAI API key here",
lines=1,
type="password")
chatbot = gr.Chatbot(
label="GPT-4V + SoM",
height=256)
generate_button = gr.Button("Generate Marks")
highlight_button = gr.Button("Highlight Marks")
with gr.Blocks() as demo:
gr.Markdown(MARKDOWN)
inference_state = gr.State({})
with gr.Row():
with gr.Column():
image_input.render()
with gr.Accordion(
label="Detailed prompt settings (e.g., mark type)",
open=False):
with gr.Row():
checkbox_annotation_mode.render()
with gr.Row():
slider_mask_alpha.render()
with gr.Column():
image_output.render()
generate_button.render()
highlight_button.render()
with gr.Row():
openai_api_key.render()
with gr.Row():
gr.ChatInterface(
chatbot=chatbot,
fn=prompt,
additional_inputs=[inference_state, openai_api_key])
generate_button.click(
fn=inference,
inputs=[image_input, checkbox_annotation_mode, slider_mask_alpha],
outputs=[image_output, inference_state])
image_input.clear(
fn=on_image_input_clear,
outputs=[image_output, inference_state]
)
highlight_button.click(
fn=highlight,
inputs=[inference_state, chatbot],
outputs=[image_output])
demo.queue().launch(debug=False, show_error=True)
|