Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,100 +4,111 @@ import numpy as np
|
|
4 |
import gradio as gr
|
5 |
import tempfile
|
6 |
import os
|
|
|
|
|
7 |
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
|
8 |
|
9 |
-
# Function to preprocess each frame
|
10 |
def preprocess_frame(frame):
|
11 |
-
resized_frame = cv2.resize(frame, (224, 224))
|
12 |
normalized_frame = resized_frame / 255.0
|
13 |
-
return np.expand_dims(normalized_frame, axis=0)
|
14 |
|
15 |
-
# Function to draw pretty label on the frame
|
16 |
def draw_label(frame, label, position=(50, 50), font_scale=1, thickness=2):
|
17 |
-
# Define label properties
|
18 |
if label == 'Drowsy':
|
19 |
color = (0, 0, 255) # Red for Drowsy
|
20 |
bg_color = (0, 0, 100) # Darker background for Drowsy
|
21 |
else:
|
22 |
color = (0, 255, 0) # Green for Alert
|
23 |
bg_color = (0, 100, 0) # Darker background for Alert
|
24 |
-
|
25 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
26 |
text_size = cv2.getTextSize(label, font, font_scale, thickness)[0]
|
27 |
|
28 |
-
# Define rectangle background dimensions
|
29 |
text_x, text_y = position
|
30 |
-
rect_start = (text_x, text_y - text_size[1] -
|
31 |
-
rect_end = (text_x + text_size[0] +
|
32 |
|
33 |
-
# Draw rectangle background
|
34 |
cv2.rectangle(frame, rect_start, rect_end, bg_color, -1)
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
cv2.putText(frame,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
@spaces.GPU(duration=120)
|
43 |
def predict_drowsiness(video_path):
|
44 |
-
# Open the video file
|
45 |
import tensorflow as tf
|
46 |
print(tf.config.list_physical_devices("GPU"))
|
47 |
model = tf.keras.models.load_model('cnn.keras')
|
|
|
48 |
cap = cv2.VideoCapture(video_path)
|
49 |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
50 |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
51 |
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
|
|
52 |
|
53 |
-
|
54 |
-
skip_interval = int(fps * 0.5) # Skip frames to achieve 1 frame every 0.5 seconds
|
55 |
|
56 |
-
# Create a temporary file for the output video
|
57 |
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_output:
|
58 |
temp_output_path = temp_output.name
|
59 |
|
60 |
-
# Output video settings
|
61 |
out = cv2.VideoWriter(temp_output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
|
62 |
|
63 |
frame_count = 0
|
|
|
|
|
|
|
64 |
while cap.isOpened():
|
65 |
ret, frame = cap.read()
|
66 |
if not ret:
|
67 |
break
|
68 |
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
70 |
if frame_count % skip_interval == 0:
|
71 |
-
# Preprocess frame
|
72 |
preprocessed_frame = preprocess_frame(frame)
|
73 |
-
|
74 |
-
# Use the model to predict drowsiness
|
75 |
prediction = model.predict(preprocessed_frame)
|
76 |
drowsiness = np.argmax(prediction)
|
77 |
|
78 |
-
# Add label to frame with improved visibility
|
79 |
label = 'Drowsy' if drowsiness == 0 else 'Alert'
|
80 |
-
draw_label(frame, label, position=(50, 50))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
# Write the frame (whether labeled or not) to the output video
|
83 |
out.write(frame)
|
84 |
frame_count += 1
|
85 |
|
86 |
-
# Release resources
|
87 |
cap.release()
|
88 |
out.release()
|
89 |
|
90 |
-
return temp_output_path
|
91 |
|
92 |
-
# Gradio interface
|
93 |
interface = gr.Interface(
|
94 |
fn=predict_drowsiness,
|
95 |
-
inputs=gr.Video(),
|
96 |
-
outputs="video",
|
97 |
-
title="Drowsiness Detection in Video",
|
98 |
-
description="Upload a video or record one
|
99 |
)
|
100 |
|
101 |
-
# Launch the app
|
102 |
if __name__ == "__main__":
|
103 |
interface.launch()
|
|
|
4 |
import gradio as gr
|
5 |
import tempfile
|
6 |
import os
|
7 |
+
from datetime import timedelta
|
8 |
+
|
9 |
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
|
10 |
|
|
|
11 |
def preprocess_frame(frame):
|
12 |
+
resized_frame = cv2.resize(frame, (224, 224))
|
13 |
normalized_frame = resized_frame / 255.0
|
14 |
+
return np.expand_dims(normalized_frame, axis=0)
|
15 |
|
|
|
16 |
def draw_label(frame, label, position=(50, 50), font_scale=1, thickness=2):
|
|
|
17 |
if label == 'Drowsy':
|
18 |
color = (0, 0, 255) # Red for Drowsy
|
19 |
bg_color = (0, 0, 100) # Darker background for Drowsy
|
20 |
else:
|
21 |
color = (0, 255, 0) # Green for Alert
|
22 |
bg_color = (0, 100, 0) # Darker background for Alert
|
23 |
+
|
24 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
25 |
text_size = cv2.getTextSize(label, font, font_scale, thickness)[0]
|
26 |
|
|
|
27 |
text_x, text_y = position
|
28 |
+
rect_start = (text_x - 5, text_y - text_size[1] - 15)
|
29 |
+
rect_end = (text_x + text_size[0] + 5, text_y + 5)
|
30 |
|
|
|
31 |
cv2.rectangle(frame, rect_start, rect_end, bg_color, -1)
|
32 |
+
cv2.putText(frame, label, (text_x, text_y), font, font_scale, (255, 255, 255), thickness + 2, lineType=cv2.LINE_AA)
|
33 |
+
cv2.putText(frame, label, (text_x, text_y), font, font_scale, color, thickness, lineType=cv2.LINE_AA)
|
34 |
+
|
35 |
+
def add_timestamp(frame, timestamp):
|
36 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
37 |
+
cv2.putText(frame, timestamp, (10, frame.shape[0] - 10), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
|
38 |
+
|
39 |
+
def draw_progress_bar(frame, progress):
|
40 |
+
frame_width = frame.shape[1]
|
41 |
+
bar_height = 5
|
42 |
+
bar_width = int(frame_width * progress)
|
43 |
+
cv2.rectangle(frame, (0, 0), (bar_width, bar_height), (0, 255, 0), -1)
|
44 |
+
cv2.rectangle(frame, (0, 0), (frame_width, bar_height), (255, 255, 255), 1)
|
45 |
|
46 |
@spaces.GPU(duration=120)
|
47 |
def predict_drowsiness(video_path):
|
|
|
48 |
import tensorflow as tf
|
49 |
print(tf.config.list_physical_devices("GPU"))
|
50 |
model = tf.keras.models.load_model('cnn.keras')
|
51 |
+
|
52 |
cap = cv2.VideoCapture(video_path)
|
53 |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
54 |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
55 |
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
56 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
57 |
|
58 |
+
skip_interval = int(fps * 0.5)
|
|
|
59 |
|
|
|
60 |
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_output:
|
61 |
temp_output_path = temp_output.name
|
62 |
|
|
|
63 |
out = cv2.VideoWriter(temp_output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
|
64 |
|
65 |
frame_count = 0
|
66 |
+
drowsy_count = 0
|
67 |
+
alert_count = 0
|
68 |
+
|
69 |
while cap.isOpened():
|
70 |
ret, frame = cap.read()
|
71 |
if not ret:
|
72 |
break
|
73 |
|
74 |
+
progress = frame_count / total_frames
|
75 |
+
draw_progress_bar(frame, progress)
|
76 |
+
|
77 |
+
timestamp = str(timedelta(seconds=int(frame_count/fps)))
|
78 |
+
add_timestamp(frame, timestamp)
|
79 |
+
|
80 |
if frame_count % skip_interval == 0:
|
|
|
81 |
preprocessed_frame = preprocess_frame(frame)
|
|
|
|
|
82 |
prediction = model.predict(preprocessed_frame)
|
83 |
drowsiness = np.argmax(prediction)
|
84 |
|
|
|
85 |
label = 'Drowsy' if drowsiness == 0 else 'Alert'
|
86 |
+
draw_label(frame, label, position=(50, 50))
|
87 |
+
|
88 |
+
if label == 'Drowsy':
|
89 |
+
drowsy_count += 1
|
90 |
+
else:
|
91 |
+
alert_count += 1
|
92 |
+
|
93 |
+
# Add drowsiness statistics
|
94 |
+
stats_text = f"Drowsy: {drowsy_count} | Alert: {alert_count}"
|
95 |
+
cv2.putText(frame, stats_text, (frame_width - 200, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
|
96 |
|
|
|
97 |
out.write(frame)
|
98 |
frame_count += 1
|
99 |
|
|
|
100 |
cap.release()
|
101 |
out.release()
|
102 |
|
103 |
+
return temp_output_path
|
104 |
|
|
|
105 |
interface = gr.Interface(
|
106 |
fn=predict_drowsiness,
|
107 |
+
inputs=gr.Video(),
|
108 |
+
outputs="video",
|
109 |
+
title="Enhanced Drowsiness Detection in Video",
|
110 |
+
description="Upload a video or record one to detect drowsiness with improved visuals and statistics.",
|
111 |
)
|
112 |
|
|
|
113 |
if __name__ == "__main__":
|
114 |
interface.launch()
|