File size: 6,423 Bytes
eaa4e30
 
7d35b07
 
486b028
 
ad414f5
 
7d35b07
9ec5726
eaa4e30
 
 
759bcfc
 
 
 
 
 
 
 
 
 
9ec5726
 
 
 
 
 
 
 
 
 
759bcfc
 
 
 
 
 
9ec5726
 
 
3f85f8f
8be0123
3f85f8f
 
 
 
 
8be0123
3f85f8f
99d8cd7
8be0123
 
215a6c6
99d8cd7
a2e51ed
215a6c6
99d8cd7
 
215a6c6
99d8cd7
8be0123
 
 
 
 
 
 
 
 
 
 
99d8cd7
 
a2e51ed
 
 
 
 
8be0123
215a6c6
 
 
 
8be0123
 
 
 
abdcbed
 
8be0123
 
 
 
 
99d8cd7
a2e51ed
 
 
99d8cd7
215a6c6
 
 
 
abdcbed
215a6c6
 
 
abdcbed
 
 
 
8be0123
759bcfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d35b07
6e47efd
 
2c9be27
6e47efd
759bcfc
7d35b07
eaa4e30
759bcfc
 
7d35b07
759bcfc
 
 
 
7d35b07
 
 
 
9ec5726
 
 
 
 
7d35b07
759bcfc
 
 
7d35b07
759bcfc
 
 
 
 
7d35b07
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import streamlit as st
import cv2
import numpy as np
from ultralytics import YOLO
import tempfile
import time
from huggingface_hub import hf_hub_download

def run_yolo(image):
    # Run the model on the image and get results
    results = model(image)
    return results

# Color definitions for each class
class_colors = {
    0: (0, 255, 0),    # Green (Helmet)
    1: (255, 0, 0),    # Blue (License Plate)
    2: (0, 0, 255),    # Red (MotorbikeDelivery)
    3: (255, 255, 0),  # Cyan (MotorbikeSport)
    4: (255, 0, 255),  # Magenta (No Helmet)
    5: (0, 255, 255),  # Yellow (Person)
}

def process_results(results, image):
    # Draw bounding boxes and labels on the image
    boxes = results[0].boxes  # Get boxes from results
    for box in boxes:
        # Get the box coordinates and label
        x1, y1, x2, y2 = map(int, box.xyxy[0])  # Convert to integer coordinates
        conf = box.conf[0]  # Confidence score
        cls = int(box.cls[0])  # Class index
        label = model.names[cls]  # Get class name from index
        
        # Get the color for the current class
        color = class_colors.get(cls, (255, 255, 255))  # Default to white if class not found
        
        # Draw rectangle and label on the image with the appropriate color
        cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)  # Draw bounding box
        cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)  # Draw label

    return image


def process_video(uploaded_file):
    # Create a temporary file to save the uploaded video
    with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
        temp_file.write(uploaded_file.read())
        temp_file_path = temp_file.name  # Get the path of the temporary file
    
    # Read the video file
    video = cv2.VideoCapture(temp_file_path)
    total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))  # Get the total number of frames
    frames = []
    
    # Create a Streamlit progress bar, text for percentage, and timer
    progress_bar = st.progress(0)
    progress_text = st.empty()  # Placeholder for percentage text
    timer_text = st.empty()  # Placeholder for the timer
    
    current_frame = 0
    start_time = time.time()  # Start the timer
    
    while True:
        ret, frame = video.read()
        if not ret:
            break  # Break the loop if there are no frames left

        # Run YOLO model on the current frame
        results = run_yolo(frame)
        
        # Process the results and draw boxes on the current frame
        processed_frame = process_results(results, frame)
        frames.append(processed_frame)  # Save the processed frame
        
        current_frame += 1
        
        # Calculate and display the progress
        progress_percentage = (current_frame / total_frames) * 100
        progress_bar.progress(progress_percentage / 100)  # Update the progress bar
        progress_text.text(f'Processing: {progress_percentage:.2f}%')  # Update the percentage text

        # Calculate and display the elapsed time
        elapsed_time = time.time() - start_time
        timer_text.text(f'Elapsed Time: {elapsed_time:.2f} seconds')  # Update the timer text
    
    video.release()
    
    # Create a video writer to save the processed frames
    height, width, _ = frames[0].shape
    output_path = 'processed_video.mp4'
    out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))

    for frame in frames:
        out.write(frame)  # Write each processed frame to the video

    out.release()
    
    # Complete the progress bar and show final message
    progress_bar.progress(100)
    progress_text.text('Processing: 100%')
    st.success('Video processing complete!')

    # Display the final elapsed time
    final_elapsed_time = time.time() - start_time
    timer_text.text(f'Total Elapsed Time: {final_elapsed_time:.2f} seconds')
    
    # Display the processed video
    st.video(output_path)

    # Create a download button for the processed video
    with open(output_path, 'rb') as f:
        video_bytes = f.read()
    st.download_button(label='Download Processed Video', data=video_bytes, file_name='processed_video.mp4', mime='video/mp4')

def live_video_feed():
    stframe = st.empty()  # Placeholder for the video stream in Streamlit
    video = cv2.VideoCapture(0)  # Capture live video from the webcam

    while True:
        ret, frame = video.read()
        if not ret:
            break
        
        # Run YOLO model on the current frame
        results = run_yolo(frame)
        
        # Process the results and draw boxes on the current frame
        processed_frame = process_results(results, frame)
        
        # Display the processed frame in the Streamlit app
        stframe.image(processed_frame, channels="BGR", use_column_width=True)
        
        # Stop the live feed when user clicks on the "Stop" button
        if st.button("Stop"):
            break

    video.release()

def main():
    model_file = hf_hub_download(repo_id="TheKnight115/Yolov8m", filename="yolov8_Medium.pt")
    
    global model   
    model = YOLO(model_file)
    
    st.title("Motorbike Violation Detection")

    # Create a selection box for input type
    input_type = st.selectbox("Select Input Type", ("Image", "Video", "Live Feed"))

    # Image or video file uploader
    if input_type == "Image":
        uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
        if uploaded_file is not None:
            # Process the image
            image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
            results = run_yolo(image)
            
            # Process the results and draw boxes on the image
            processed_image = process_results(results, image)
            
            # Display the processed image
            st.image(processed_image, caption='Detected Image', use_column_width=True)

    elif input_type == "Video":
        uploaded_file = st.file_uploader("Choose a video...", type=["mp4", "mov"])
        if uploaded_file is not None:
            # Process the video
            process_video(uploaded_file)

    elif input_type == "Live Feed":
        st.write("Live video feed from webcam. Press 'Stop' to stop the feed.")
        live_video_feed()

if __name__ == "__main__":
    main()