|
import gradio as gr |
|
import numpy as np |
|
import cv2 |
|
import supervision as sv |
|
from roboflow import Roboflow |
|
import tempfile |
|
import os |
|
from sahi.predict import predict |
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
api_key = os.getenv("ROBOFLOW_API_KEY") |
|
workspace = os.getenv("ROBOFLOW_WORKSPACE") |
|
project_name = os.getenv("ROBOFLOW_PROJECT") |
|
model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION")) |
|
|
|
|
|
rf = Roboflow(api_key=api_key) |
|
project = rf.workspace(workspace).project(project_name) |
|
model = project.version(model_version).model |
|
|
|
def detect_objects(image): |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: |
|
image.save(temp_file, format="JPEG") |
|
temp_file_path = temp_file.name |
|
|
|
|
|
original_image = cv2.imread(temp_file_path) |
|
|
|
try: |
|
|
|
predictions = predict( |
|
detection_model=model, |
|
image=original_image, |
|
slice_height=800, |
|
slice_width=800, |
|
overlap_height_ratio=0.2, |
|
overlap_width_ratio=0.2, |
|
return_slice_result=False, |
|
) |
|
|
|
|
|
detections = [] |
|
for prediction in predictions: |
|
bbox = prediction.bbox |
|
class_name = prediction.category |
|
confidence = prediction.score |
|
|
|
|
|
detections.append( |
|
sv.Detection( |
|
x1=bbox[0], |
|
y1=bbox[1], |
|
x2=bbox[2], |
|
y2=bbox[3], |
|
confidence=confidence, |
|
class_name=class_name |
|
) |
|
) |
|
|
|
|
|
detections = sv.Detections(detections) |
|
|
|
|
|
label_annotator = sv.LabelAnnotator() |
|
box_annotator = sv.BoxAnnotator() |
|
|
|
|
|
annotated_image = box_annotator.annotate(scene=original_image.copy(), detections=detections) |
|
annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections) |
|
|
|
|
|
class_count = {} |
|
total_count = 0 |
|
|
|
for detection in detections: |
|
class_name = detection.class_name |
|
class_count[class_name] = class_count.get(class_name, 0) + 1 |
|
total_count += 1 |
|
|
|
|
|
result_text = "Detected Objects:\n\n" |
|
for class_name, count in class_count.items(): |
|
result_text += f"{class_name}: {count}\n" |
|
result_text += f"\nTotal objects detected: {total_count}" |
|
|
|
|
|
output_image_path = "/tmp/prediction.jpg" |
|
cv2.imwrite(output_image_path, annotated_image) |
|
|
|
except Exception as err: |
|
result_text = f"An error occurred: {err}" |
|
output_image_path = temp_file_path |
|
|
|
|
|
os.remove(temp_file_path) |
|
|
|
return output_image_path, result_text |
|
|
|
|
|
with gr.Blocks() as iface: |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_image = gr.Image(type="pil", label="Input Image") |
|
with gr.Column(): |
|
output_image = gr.Image(label="Detected Image") |
|
with gr.Column(): |
|
output_text = gr.Textbox(label="Object Count Results") |
|
|
|
detect_button = gr.Button("Detect") |
|
|
|
detect_button.click( |
|
fn=detect_objects, |
|
inputs=input_image, |
|
outputs=[output_image, output_text] |
|
) |
|
|
|
|
|
iface.launch() |
|
|