muhammadsalmanalfaridzi's picture
Update app.py
af6e415 verified
raw
history blame
1.74 kB
import gradio as gr
import supervision as sv
import numpy as np
import cv2
from inference import get_roboflow_model
# Define the Roboflow model
model = get_roboflow_model(model_id="people-detection-general/5", api_key="API_KEY")
def callback(image_slice: np.ndarray) -> sv.Detections:
results = model.infer(image_slice)[0]
return sv.Detections.from_inference(results)
# Define the slicer
slicer = sv.InferenceSlicer(callback=callback)
def detect_objects(image):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert from RGB (Gradio) to BGR (OpenCV)
# Run inference
sliced_detections = slicer(image=image)
# Annotating the image with boxes and labels
label_annotator = sv.LabelAnnotator()
box_annotator = sv.BoxAnnotator()
annotated_image = box_annotator.annotate(scene=image.copy(), detections=sliced_detections)
annotated_image = label_annotator.annotate(scene=annotated_image, detections=sliced_detections)
# Count detected objects per class
class_counts = {}
for detection in sliced_detections:
class_name = detection.class_name
class_counts[class_name] = class_counts.get(class_name, 0) + 1
# Total objects detected
total_count = sum(class_counts.values())
# Display results: annotated image and object counts
result_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB) # Convert back to RGB for Gradio
return result_image, class_counts, total_count
# Create a Gradio interface
iface = gr.Interface(
fn=detect_objects,
inputs=gr.Image(type="pil"),
outputs=[gr.Image(type="pil"), gr.JSON(), gr.Number(label="Total Objects Detected")],
live=True
)
# Launch the Gradio interface
iface.launch()