import gradio as gr import cv2 import numpy as np import torch from torchvision import models, transforms from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights from PIL import Image import mediapipe as mp from fer import FER # Facial emotion recognition # ----------------------------- # Configuration # ----------------------------- # For image processing, always run the analysis (no frame skipping) SKIP_RATE = 1 # Use GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Desired input size for faster inference DESIRED_SIZE = (640, 480) # ----------------------------- # Global caches for overlay info and frame counters # ----------------------------- posture_cache = {"landmarks": None, "text": "Initializing...", "counter": 0} emotion_cache = {"text": "Initializing...", "counter": 0} objects_cache = {"boxes": None, "text": "Initializing...", "counter": 0} faces_cache = {"boxes": None, "text": "Initializing...", "counter": 0} # ----------------------------- # Initialize Models and Helpers # ----------------------------- mp_pose = mp.solutions.pose pose = mp_pose.Pose() mp_drawing = mp.solutions.drawing_utils mp_face_detection = mp.solutions.face_detection face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.5) object_detection_model = models.detection.fasterrcnn_resnet50_fpn( weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT ) object_detection_model.eval().to(device) # Move model to GPU (if available) obj_transform = transforms.Compose([transforms.ToTensor()]) # Initialize the FER emotion detector emotion_detector = FER(mtcnn=True) # ----------------------------- # Overlay Drawing Functions # ----------------------------- def draw_posture_overlay(raw_frame, landmarks): # Draw circles for each landmark using lime green (BGR: (50,205,50)) for (x, y) in landmarks: cv2.circle(raw_frame, (x, y), 4, (50, 205, 50), -1) return raw_frame def draw_boxes_overlay(raw_frame, boxes, color): for (x1, y1, x2, y2) in boxes: cv2.rectangle(raw_frame, (x1, y1), (x2, y2), color, 2) return raw_frame # ----------------------------- # Heavy (Synchronous) Detection Functions # ----------------------------- def compute_posture_overlay(image): frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) h, w, _ = frame_bgr.shape frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) small_h, small_w, _ = frame_bgr_small.shape frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) pose_results = pose.process(frame_rgb_small) if pose_results.pose_landmarks: landmarks = [] for lm in pose_results.pose_landmarks.landmark: # Scale landmarks back to the original image size x = int(lm.x * small_w * (w / small_w)) y = int(lm.y * small_h * (h / small_h)) landmarks.append((x, y)) text = "Posture detected" else: landmarks = [] text = "No posture detected" return landmarks, text def compute_emotion_overlay(image): frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) emotions = emotion_detector.detect_emotions(frame_rgb_small) if emotions: top_emotion, score = max(emotions[0]["emotions"].items(), key=lambda x: x[1]) text = f"{top_emotion} ({score:.2f})" else: text = "No face detected" return text def compute_objects_overlay(image): frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) image_pil = Image.fromarray(frame_rgb_small) img_tensor = obj_transform(image_pil).to(device) with torch.no_grad(): detections = object_detection_model([img_tensor])[0] threshold = 0.8 boxes = [] for box, score in zip(detections["boxes"], detections["scores"]): if score > threshold: boxes.append(tuple(box.int().cpu().numpy())) text = f"Detected {len(boxes)} object(s)" if boxes else "No objects detected" return boxes, text def compute_faces_overlay(image): frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) h, w, _ = frame_bgr.shape frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) small_h, small_w, _ = frame_bgr_small.shape frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) face_results = face_detection.process(frame_rgb_small) boxes = [] if face_results.detections: for detection in face_results.detections: bbox = detection.location_data.relative_bounding_box x = int(bbox.xmin * small_w) y = int(bbox.ymin * small_h) box_w = int(bbox.width * small_w) box_h = int(bbox.height * small_h) boxes.append((x, y, x + box_w, y + box_h)) text = f"Detected {len(boxes)} face(s)" else: text = "No faces detected" return boxes, text # ----------------------------- # Main Analysis Functions for Single Image # ----------------------------- def analyze_posture_current(image): global posture_cache posture_cache["counter"] += 1 current_frame = np.array(image) if posture_cache["counter"] % SKIP_RATE == 0 or posture_cache["landmarks"] is None: landmarks, text = compute_posture_overlay(image) posture_cache["landmarks"] = landmarks posture_cache["text"] = text output = current_frame.copy() if posture_cache["landmarks"]: output = draw_posture_overlay(output, posture_cache["landmarks"]) return output, f"
Upload an image to run analysis for posture, emotions, objects, and faces.
" ) tabbed_interface.render() if __name__ == "__main__": demo.launch()