Spaces:
Sleeping
Sleeping
| import cv2 | |
| import spaces | |
| import numpy as np | |
| import gradio as gr | |
| import tempfile | |
| import os | |
| os.environ['CUDA_VISIBLE_DEVICES'] = "0" | |
| # Function to preprocess each frame | |
| def preprocess_frame(frame): | |
| resized_frame = cv2.resize(frame, (224, 224)) # Adjust size based on your model's input shape | |
| normalized_frame = resized_frame / 255.0 | |
| return np.expand_dims(normalized_frame, axis=0) # Add batch dimension | |
| # Function to draw pretty label on the frame | |
| def draw_label(frame, label, position=(50, 50), font_scale=1, thickness=2): | |
| # Define label properties | |
| if label == 'Drowsy': | |
| color = (0, 0, 255) # Red for Drowsy | |
| bg_color = (0, 0, 100) # Darker background for Drowsy | |
| else: | |
| color = (0, 255, 0) # Green for Alert | |
| bg_color = (0, 100, 0) # Darker background for Alert | |
| font = cv2.FONT_HERSHEY_SIMPLEX | |
| text_size = cv2.getTextSize(label, font, font_scale, thickness)[0] | |
| # Define rectangle background dimensions | |
| text_x, text_y = position | |
| rect_start = (text_x, text_y - text_size[1] - 10) # Adjust y to account for text height | |
| rect_end = (text_x + text_size[0] + 10, text_y + 10) | |
| # Draw rectangle background | |
| cv2.rectangle(frame, rect_start, rect_end, bg_color, -1) | |
| # Add border around text | |
| cv2.putText(frame, label, position, font, font_scale, (255, 255, 255), thickness + 2, lineType=cv2.LINE_AA) | |
| # Add the main colored text | |
| cv2.putText(frame, label, position, font, font_scale, color, thickness, lineType=cv2.LINE_AA) | |
| def predict_drowsiness(video_path): | |
| # Open the video file | |
| import tensorflow as tf | |
| print(tf.config.list_physical_devices("GPU")) | |
| model = tf.keras.models.load_model('cnn.keras') | |
| cap = cv2.VideoCapture(video_path) | |
| frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
| # Calculate frame skipping interval based on 0.5 seconds | |
| skip_interval = int(fps * 0.5) # Skip frames to achieve 1 frame every 0.5 seconds | |
| # Create a temporary file for the output video | |
| with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_output: | |
| temp_output_path = temp_output.name | |
| # Output video settings | |
| out = cv2.VideoWriter(temp_output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height)) | |
| frame_count = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Only process frames at the specified interval | |
| if frame_count % skip_interval == 0: | |
| # Preprocess frame | |
| preprocessed_frame = preprocess_frame(frame) | |
| # Use the model to predict drowsiness | |
| prediction = model.predict(preprocessed_frame) | |
| drowsiness = np.argmax(prediction) | |
| # Add label to frame with improved visibility | |
| label = 'Drowsy' if drowsiness == 0 else 'Alert' | |
| draw_label(frame, label, position=(50, 50)) # Use the draw_label function | |
| # Write the frame (whether labeled or not) to the output video | |
| out.write(frame) | |
| frame_count += 1 | |
| # Release resources | |
| cap.release() | |
| out.release() | |
| return temp_output_path # Return the path to the temporary output video | |
| # Gradio interface | |
| interface = gr.Interface( | |
| fn=predict_drowsiness, | |
| inputs=gr.Video(), # Video input from webcam or upload | |
| outputs="video", # Return a playable video with predictions | |
| title="Drowsiness Detection in Video", | |
| description="Upload a video or record one, and this tool will detect if the person is drowsy.", | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| interface.launch() |