TheKnight115's picture
Update app.py
759bcfc verified
raw
history blame
6.42 kB
import streamlit as st
import cv2
import numpy as np
from ultralytics import YOLO
import tempfile
import time
from huggingface_hub import hf_hub_download
def run_yolo(image):
# Run the model on the image and get results
results = model(image)
return results
# Color definitions for each class
class_colors = {
0: (0, 255, 0), # Green (Helmet)
1: (255, 0, 0), # Blue (License Plate)
2: (0, 0, 255), # Red (MotorbikeDelivery)
3: (255, 255, 0), # Cyan (MotorbikeSport)
4: (255, 0, 255), # Magenta (No Helmet)
5: (0, 255, 255), # Yellow (Person)
}
def process_results(results, image):
# Draw bounding boxes and labels on the image
boxes = results[0].boxes # Get boxes from results
for box in boxes:
# Get the box coordinates and label
x1, y1, x2, y2 = map(int, box.xyxy[0]) # Convert to integer coordinates
conf = box.conf[0] # Confidence score
cls = int(box.cls[0]) # Class index
label = model.names[cls] # Get class name from index
# Get the color for the current class
color = class_colors.get(cls, (255, 255, 255)) # Default to white if class not found
# Draw rectangle and label on the image with the appropriate color
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) # Draw bounding box
cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) # Draw label
return image
def process_video(uploaded_file):
# Create a temporary file to save the uploaded video
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
temp_file.write(uploaded_file.read())
temp_file_path = temp_file.name # Get the path of the temporary file
# Read the video file
video = cv2.VideoCapture(temp_file_path)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # Get the total number of frames
frames = []
# Create a Streamlit progress bar, text for percentage, and timer
progress_bar = st.progress(0)
progress_text = st.empty() # Placeholder for percentage text
timer_text = st.empty() # Placeholder for the timer
current_frame = 0
start_time = time.time() # Start the timer
while True:
ret, frame = video.read()
if not ret:
break # Break the loop if there are no frames left
# Run YOLO model on the current frame
results = run_yolo(frame)
# Process the results and draw boxes on the current frame
processed_frame = process_results(results, frame)
frames.append(processed_frame) # Save the processed frame
current_frame += 1
# Calculate and display the progress
progress_percentage = (current_frame / total_frames) * 100
progress_bar.progress(progress_percentage / 100) # Update the progress bar
progress_text.text(f'Processing: {progress_percentage:.2f}%') # Update the percentage text
# Calculate and display the elapsed time
elapsed_time = time.time() - start_time
timer_text.text(f'Elapsed Time: {elapsed_time:.2f} seconds') # Update the timer text
video.release()
# Create a video writer to save the processed frames
height, width, _ = frames[0].shape
output_path = 'processed_video.mp4'
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))
for frame in frames:
out.write(frame) # Write each processed frame to the video
out.release()
# Complete the progress bar and show final message
progress_bar.progress(100)
progress_text.text('Processing: 100%')
st.success('Video processing complete!')
# Display the final elapsed time
final_elapsed_time = time.time() - start_time
timer_text.text(f'Total Elapsed Time: {final_elapsed_time:.2f} seconds')
# Display the processed video
st.video(output_path)
# Create a download button for the processed video
with open(output_path, 'rb') as f:
video_bytes = f.read()
st.download_button(label='Download Processed Video', data=video_bytes, file_name='processed_video.mp4', mime='video/mp4')
def live_video_feed():
stframe = st.empty() # Placeholder for the video stream in Streamlit
video = cv2.VideoCapture(0) # Capture live video from the webcam
while True:
ret, frame = video.read()
if not ret:
break
# Run YOLO model on the current frame
results = run_yolo(frame)
# Process the results and draw boxes on the current frame
processed_frame = process_results(results, frame)
# Display the processed frame in the Streamlit app
stframe.image(processed_frame, channels="BGR", use_column_width=True)
# Stop the live feed when user clicks on the "Stop" button
if st.button("Stop"):
break
video.release()
def main():
model_file = hf_hub_download(repo_id="TheKnight115/Yolov8m", filename="yolov8_Medium.pt")
global model
model = YOLO(model_file)
st.title("Motorbike Violation Detection")
# Create a selection box for input type
input_type = st.selectbox("Select Input Type", ("Image", "Video", "Live Feed"))
# Image or video file uploader
if input_type == "Image":
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Process the image
image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
results = run_yolo(image)
# Process the results and draw boxes on the image
processed_image = process_results(results, image)
# Display the processed image
st.image(processed_image, caption='Detected Image', use_column_width=True)
elif input_type == "Video":
uploaded_file = st.file_uploader("Choose a video...", type=["mp4", "mov"])
if uploaded_file is not None:
# Process the video
process_video(uploaded_file)
elif input_type == "Live Feed":
st.write("Live video feed from webcam. Press 'Stop' to stop the feed.")
live_video_feed()
if __name__ == "__main__":
main()