Spaces:
Paused
Paused
File size: 1,968 Bytes
206505f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import cv2
import numpy as np
import tempfile
import os
from ultralytics import YOLO
def stream_object_detection(video_path, conf_threshold):
# Load the YOLO model
model = YOLO("weights/best.pt")
cap = cv2.VideoCapture(video_path)
# Get video properties
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) // 2)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) // 2)
# Temporary file for processed video
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
temp_file_path = temp_file.name
# VideoWriter to save processed frames
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter(temp_file_path, fourcc, fps, (width, height))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (width, height))
# Run YOLO predictions
results = model.predict(frame)
# Annotate frame with detection results
annotated_frame = results[0].plot()
# Write annotated frame to the video file
out.write(annotated_frame)
cap.release()
out.release()
return temp_file_path
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
video_input = gr.Video(label="Upload Video")
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.0,
maximum=1.0,
step=0.05,
value=0.30,
)
with gr.Column():
video_output = gr.Video(label="Processed Video")
with gr.Row():
with gr.Column():
detect_button = gr.Button("Start Detection", variant="primary")
detect_button.click(
fn=stream_object_detection,
inputs=[video_input, conf_threshold],
outputs=video_output,
)
if __name__ == "__main__":
app.launch()
|