Spaces:
Running
Running
Change YOLOS input image to PIL type
Browse filesTrying to see if this fixes an error with the YOLOS Image Processor
app.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
from threading import Thread
|
2 |
|
|
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
-
from transformers import PreTrainedModel # for type hint
|
6 |
from transformers import TextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer # Moondream
|
7 |
from transformers import YolosImageProcessor, YolosForObjectDetection # YOLOS-small-300
|
8 |
|
@@ -49,11 +50,11 @@ def answer_question(img, prompt):
|
|
49 |
yield buffer.strip()
|
50 |
|
51 |
|
52 |
-
def detect_objects(img):
|
53 |
-
inputs = yolos_processor(
|
54 |
outputs = yolos_model(**inputs)
|
55 |
|
56 |
-
target_sizes = torch.tensor([img.size
|
57 |
results = yolos_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0]
|
58 |
|
59 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
@@ -76,7 +77,7 @@ if __name__ == "__main__":
|
|
76 |
|
77 |
with gr.Tab("Object Detection"):
|
78 |
with gr.Row():
|
79 |
-
yolos_input = gr.Image()
|
80 |
yolos_output = gr.Image()
|
81 |
yolos_button = gr.Button("Submit")
|
82 |
|
@@ -89,6 +90,6 @@ if __name__ == "__main__":
|
|
89 |
moon_output = gr.TextArea(label="Output")
|
90 |
|
91 |
moon_submit.click(answer_question, [moon_img, moon_prompt], moon_output)
|
92 |
-
yolos_button.click(detect_objects, [yolos_input]
|
93 |
|
94 |
app.queue().launch()
|
|
|
1 |
from threading import Thread
|
2 |
|
3 |
+
from PIL import Image
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
+
from transformers import PreTrainedModel, AutoImageProcessor # for type hint
|
7 |
from transformers import TextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer # Moondream
|
8 |
from transformers import YolosImageProcessor, YolosForObjectDetection # YOLOS-small-300
|
9 |
|
|
|
50 |
yield buffer.strip()
|
51 |
|
52 |
|
53 |
+
def detect_objects(img: Image):
|
54 |
+
inputs = yolos_processor(img, return_tensors="pt")
|
55 |
outputs = yolos_model(**inputs)
|
56 |
|
57 |
+
target_sizes = torch.tensor([tuple(reversed(img.size))])
|
58 |
results = yolos_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0]
|
59 |
|
60 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
|
|
77 |
|
78 |
with gr.Tab("Object Detection"):
|
79 |
with gr.Row():
|
80 |
+
yolos_input = gr.Image(type="pil")
|
81 |
yolos_output = gr.Image()
|
82 |
yolos_button = gr.Button("Submit")
|
83 |
|
|
|
90 |
moon_output = gr.TextArea(label="Output")
|
91 |
|
92 |
moon_submit.click(answer_question, [moon_img, moon_prompt], moon_output)
|
93 |
+
yolos_button.click(detect_objects, [yolos_input])
|
94 |
|
95 |
app.queue().launch()
|