Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import supervision as sv | |
import PIL.Image as Image | |
from ultralytics import YOLO, YOLOv10 | |
from huggingface_hub import hf_hub_download | |
def download_models(model_id): | |
hf_hub_download("atalaydenknalbant/asl-models", filename=f"{model_id}", local_dir=f"./") | |
return f"./{model_id}" | |
box_annotator = sv.BoxAnnotator() | |
category_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', | |
9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', | |
17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z'} | |
def yolo_inference(image, model_id, conf_threshold, iou_threshold, max_detection): | |
model_path = download_models(model_id) | |
if model_id[:7] == 'yolov10': | |
model = YOLOv10(model_path) | |
else: | |
model = YOLO(model_path) | |
results = model(source=image, imgsz=416, iou=iou_threshold, conf=conf_threshold, verbose=False, max_det=max_detection)[0] | |
detections = sv.Detections.from_ultralytics(results) | |
labels = [ | |
f"{category_dict[class_id]} {confidence:.2f}" | |
for class_id, confidence in zip(detections.class_id, detections.confidence) | |
] | |
annotated_image = box_annotator.annotate(image, detections=detections, labels=labels) | |
return annotated_image | |
def app(): | |
with gr.Blocks(): | |
with gr.Row(): | |
with gr.Column(): | |
image = gr.Image(type="pil", label="Image", interactive=True) | |
model_id = gr.Dropdown( | |
label="Model", | |
choices=[ | |
"yolov10x.pt", | |
"yolov10s.pt", | |
"yolov9e.pt", | |
"yolov8x.pt", | |
], | |
value="yolov10s.pt", | |
) | |
conf_threshold = gr.Slider( | |
label="Confidence Threshold", | |
minimum=0.1, | |
maximum=1.0, | |
step=0.1, | |
value=0.25, | |
) | |
iou_threshold = gr.Slider( | |
label="IoU Threshold", | |
minimum=0.1, | |
maximum=1.0, | |
step=0.1, | |
value=0.45, | |
) | |
max_detection = gr.Slider( | |
label="Max Detection", | |
minimum=1, | |
step=1, | |
value=1, | |
) | |
yolov_infer = gr.Button(value="Detect Objects") | |
with gr.Column(): | |
output_image = gr.Image(type="pil", label="Annotated Image", interactive=False) | |
yolov_infer.click( | |
fn=yolo_inference, | |
inputs=[ | |
image, | |
model_id, | |
conf_threshold, | |
iou_threshold, | |
max_detection, | |
], | |
outputs=[output_image], | |
) | |
gr.Examples( | |
examples=[ | |
[ | |
"b.jpg", | |
"yolov10x.pt", | |
0.25, | |
0.45, | |
1, | |
], | |
[ | |
"a.jpg", | |
"yolov10s.pt", | |
0.25, | |
0.45, | |
1, | |
], | |
[ | |
"y.jpg", | |
"yolov10x.pt", | |
0.25, | |
0.45, | |
1, | |
], | |
], | |
fn=yolo_inference, | |
inputs=[ | |
image, | |
model_id, | |
conf_threshold, | |
iou_threshold, | |
max_detection, | |
], | |
outputs=[output_image], | |
cache_examples="lazy", | |
) | |
gradio_app = gr.Blocks() | |
with gradio_app: | |
with gr.Row(): | |
with gr.Column(): | |
app() | |
gradio_app.launch(debug=True) |