File size: 4,848 Bytes
eaa4e30
 
7d35b07
 
ad414f5
 
eaa4e30
7d35b07
9ec5726
eaa4e30
 
 
9ec5726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f85f8f
 
8be0123
3f85f8f
 
 
 
 
8be0123
3f85f8f
99d8cd7
8be0123
 
215a6c6
99d8cd7
a2e51ed
215a6c6
99d8cd7
 
215a6c6
99d8cd7
8be0123
 
 
 
 
 
 
 
 
 
 
99d8cd7
 
a2e51ed
 
 
 
 
8be0123
215a6c6
 
 
 
8be0123
 
 
 
abdcbed
 
8be0123
 
 
 
 
99d8cd7
a2e51ed
 
 
99d8cd7
215a6c6
 
 
 
abdcbed
215a6c6
 
 
abdcbed
 
 
 
8be0123
7d35b07
6e47efd
 
 
 
 
7d35b07
eaa4e30
7d35b07
 
 
 
 
 
 
 
 
9ec5726
 
 
 
 
7d35b07
 
 
8be0123
abdcbed
7d35b07
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import streamlit as st
import cv2
import numpy as np
from ultralytics import YOLO
from huggingface_hub import hf_hub_download


def run_yolo(image):
    # Run the model on the image and get results
    results = model(image)
    return results

def process_results(results, image):
    # Draw bounding boxes and labels on the image
    boxes = results[0].boxes  # Get boxes from results
    for box in boxes:
        # Get the box coordinates and label
        x1, y1, x2, y2 = map(int, box.xyxy[0])  # Convert to integer coordinates
        conf = box.conf[0]  # Confidence score
        cls = int(box.cls[0])  # Class index
        label = model.names[cls]  # Get class name from index
        
        # Draw rectangle and label on the image
        cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)  # Blue box
        cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

    return image

import tempfile

def process_video(uploaded_file):
    # Create a temporary file to save the uploaded video
    with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
        temp_file.write(uploaded_file.read())
        temp_file_path = temp_file.name  # Get the path of the temporary file
    
    # Read the video file
    video = cv2.VideoCapture(temp_file_path)
    total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))  # Get the total number of frames
    frames = []
    
    # Create a Streamlit progress bar, text for percentage, and timer
    progress_bar = st.progress(0)
    progress_text = st.empty()  # Placeholder for percentage text
    timer_text = st.empty()  # Placeholder for the timer
    
    current_frame = 0
    start_time = time.time()  # Start the timer
    
    while True:
        ret, frame = video.read()
        if not ret:
            break  # Break the loop if there are no frames left

        # Run YOLO model on the current frame
        results = run_yolo(frame)
        
        # Process the results and draw boxes on the current frame
        processed_frame = process_results(results, frame)
        frames.append(processed_frame)  # Save the processed frame
        
        current_frame += 1
        
        # Calculate and display the progress
        progress_percentage = (current_frame / total_frames) * 100
        progress_bar.progress(progress_percentage / 100)  # Update the progress bar
        progress_text.text(f'Processing: {progress_percentage:.2f}%')  # Update the percentage text

        # Calculate and display the elapsed time
        elapsed_time = time.time() - start_time
        timer_text.text(f'Elapsed Time: {elapsed_time:.2f} seconds')  # Update the timer text
    
    video.release()
    
    # Create a video writer to save the processed frames
    height, width, _ = frames[0].shape
    output_path = 'processed_video.mp4'
    out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height))

    for frame in frames:
        out.write(frame)  # Write each processed frame to the video

    out.release()
    
    # Complete the progress bar and show final message
    progress_bar.progress(100)
    progress_text.text('Processing: 100%')
    st.success('Video processing complete!')

    # Display the final elapsed time
    final_elapsed_time = time.time() - start_time
    timer_text.text(f'Total Elapsed Time: {final_elapsed_time:.2f} seconds')
    
    # Display the processed video
    st.video(output_path)

    # Create a download button for the processed video
    with open(output_path, 'rb') as f:
        video_bytes = f.read()
    st.download_button(label='Download Processed Video', data=video_bytes, file_name='processed_video.mp4', mime='video/mp4')

def main():
    model_file = hf_hub_download(repo_id="TheKnight115/Yolov8m", filename="yolov8_Medium.pt")
    
    # Load the YOLO model
    model = YOLO(model_file)

    st.title("Motorbike Violation Detection")

    # Upload file
    uploaded_file = st.file_uploader("Choose an image or video...", type=["jpg", "jpeg", "png", "mp4"])

    if uploaded_file is not None:
        if uploaded_file.type in ["image/jpeg", "image/png", "image/jpg"]:
            # Process the image
            image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
            results = run_yolo(image)
            
            # Process the results and draw boxes on the image
            processed_image = process_results(results, image)
            
            # Display the processed image
            st.image(processed_image, caption='Detected Image', use_column_width=True)

        elif uploaded_file.type == "video/mp4":
            # Process the video
            process_video(uploaded_file)  # Process the video and save the output
            

if __name__ == "__main__":
    main()