File size: 1,739 Bytes
ac831c4 af6e415 52b0cb8 1f35d81 52b0cb8 af6e415 52b0cb8 af6e415 52b0cb8 af6e415 93307f9 af6e415 ac831c4 af6e415 5a61493 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
import supervision as sv
import numpy as np
import cv2
from inference import get_roboflow_model
# Define the Roboflow model
model = get_roboflow_model(model_id="people-detection-general/5", api_key="API_KEY")
def callback(image_slice: np.ndarray) -> sv.Detections:
results = model.infer(image_slice)[0]
return sv.Detections.from_inference(results)
# Define the slicer
slicer = sv.InferenceSlicer(callback=callback)
def detect_objects(image):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert from RGB (Gradio) to BGR (OpenCV)
# Run inference
sliced_detections = slicer(image=image)
# Annotating the image with boxes and labels
label_annotator = sv.LabelAnnotator()
box_annotator = sv.BoxAnnotator()
annotated_image = box_annotator.annotate(scene=image.copy(), detections=sliced_detections)
annotated_image = label_annotator.annotate(scene=annotated_image, detections=sliced_detections)
# Count detected objects per class
class_counts = {}
for detection in sliced_detections:
class_name = detection.class_name
class_counts[class_name] = class_counts.get(class_name, 0) + 1
# Total objects detected
total_count = sum(class_counts.values())
# Display results: annotated image and object counts
result_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) # Convert back to RGB for Gradio
return result_image, class_counts, total_count
# Create a Gradio interface
iface = gr.Interface(
fn=detect_objects,
inputs=gr.Image(type="pil"),
outputs=[gr.Image(type="pil"), gr.JSON(), gr.Number(label="Total Objects Detected")],
live=True
)
# Launch the Gradio interface
iface.launch()
|