import gradio as gr import cv2 import time import numpy as np from sahi import AutoDetectionModel from sahi.predict import get_sliced_prediction from pathlib import Path detection_model = AutoDetectionModel.from_pretrained( model_type='ultralytics', model_path="/workspace/runs/detect/train2/weights/last.pt", # Replace with your model path confidence_threshold=0.01, device="cpu" # Change to 'cuda:0' if you have a GPU ) OUTPUT_PATH = "/workspace/pred_image.jpg" TEMP_PNG_PATH = "/workspace/pred_image.png" def run_inference(image): input_path = "/workspace/input_image.jpg" cv2.imwrite(input_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) result = get_sliced_prediction( input_path, detection_model, slice_height=256, slice_width=256, overlap_height_ratio=0.2, overlap_width_ratio=0.2 ) result.export_visuals(export_dir=Path(TEMP_PNG_PATH).parent, file_name=Path(TEMP_PNG_PATH).name) time.sleep(2) if not Path(TEMP_PNG_PATH).exists(): raise FileNotFoundError(f"SAHI did not save the PNG file at {TEMP_PNG_PATH}") processed_image = cv2.imread(TEMP_PNG_PATH) cv2.imwrite(OUTPUT_PATH, processed_image) Path(TEMP_PNG_PATH).unlink() return OUTPUT_PATH demo = gr.Interface( fn=run_inference, inputs=gr.Image(type="numpy"), outputs=gr.Image(type="file"), title="YOLO11 Object Detection", description="Upload a DDR image to run inference using YOLO11" ) demo.launch()