|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
import torch |
|
from torchvision import models, transforms |
|
from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights |
|
from PIL import Image |
|
import mediapipe as mp |
|
from fer import FER |
|
from transformers import AutoFeatureExtractor, AutoModel |
|
|
|
|
|
|
|
|
|
SKIP_RATE = 1 |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
DESIRED_SIZE = (640, 480) |
|
|
|
|
|
|
|
|
|
posture_cache = {"landmarks": None, "text": "Initializing...", "counter": 0} |
|
emotion_cache = {"text": "Initializing...", "counter": 0} |
|
objects_cache = {"boxes": None, "text": "Initializing...", "object_list_text": "", "counter": 0} |
|
faces_cache = {"boxes": None, "text": "Initializing...", "counter": 0} |
|
|
|
|
|
|
|
|
|
|
|
mp_pose = mp.solutions.pose |
|
pose = mp_pose.Pose() |
|
mp_drawing = mp.solutions.drawing_utils |
|
|
|
mp_face_detection = mp.solutions.face_detection |
|
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.5) |
|
|
|
|
|
object_detection_model = models.detection.fasterrcnn_resnet50_fpn( |
|
weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT |
|
) |
|
object_detection_model.eval().to(device) |
|
obj_transform = transforms.Compose([transforms.ToTensor()]) |
|
|
|
|
|
emotion_detector = FER(mtcnn=True) |
|
|
|
|
|
object_categories = FasterRCNN_ResNet50_FPN_Weights.DEFAULT.meta["categories"] |
|
|
|
|
|
|
|
|
|
facial_recognition_extractor = AutoFeatureExtractor.from_pretrained("facebook/dino-vitb16") |
|
facial_recognition_model = AutoModel.from_pretrained("facebook/dino-vitb16") |
|
facial_recognition_model.to(device) |
|
facial_recognition_model.eval() |
|
|
|
|
|
|
|
|
|
def draw_posture_overlay(raw_frame, landmarks): |
|
|
|
for connection in mp_pose.POSE_CONNECTIONS: |
|
start_idx, end_idx = connection |
|
if start_idx < len(landmarks) and end_idx < len(landmarks): |
|
start_point = landmarks[start_idx] |
|
end_point = landmarks[end_idx] |
|
cv2.line(raw_frame, start_point, end_point, (50, 205, 50), 2) |
|
|
|
for (x, y) in landmarks: |
|
cv2.circle(raw_frame, (x, y), 4, (50, 205, 50), -1) |
|
return raw_frame |
|
|
|
def draw_boxes_overlay(raw_frame, boxes, color): |
|
for (x1, y1, x2, y2) in boxes: |
|
cv2.rectangle(raw_frame, (x1, y1), (x2, y2), color, 2) |
|
return raw_frame |
|
|
|
|
|
|
|
|
|
def compute_posture_overlay(image): |
|
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
h, w, _ = frame_bgr.shape |
|
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) |
|
small_h, small_w, _ = frame_bgr_small.shape |
|
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) |
|
pose_results = pose.process(frame_rgb_small) |
|
if pose_results.pose_landmarks: |
|
landmarks = [] |
|
for lm in pose_results.pose_landmarks.landmark: |
|
|
|
x = int(lm.x * small_w * (w / small_w)) |
|
y = int(lm.y * small_h * (h / small_h)) |
|
landmarks.append((x, y)) |
|
text = "Posture detected" |
|
else: |
|
landmarks = [] |
|
text = "No posture detected" |
|
return landmarks, text |
|
|
|
def compute_emotion_overlay(image): |
|
|
|
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) |
|
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) |
|
emotions = emotion_detector.detect_emotions(frame_rgb_small) |
|
if emotions: |
|
top_emotion, score = max(emotions[0]["emotions"].items(), key=lambda x: x[1]) |
|
text = f"{top_emotion} ({score:.2f})" |
|
else: |
|
text = "No face detected" |
|
return text |
|
|
|
def compute_objects_overlay(image): |
|
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) |
|
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) |
|
image_pil = Image.fromarray(frame_rgb_small) |
|
img_tensor = obj_transform(image_pil).to(device) |
|
with torch.no_grad(): |
|
detections = object_detection_model([img_tensor])[0] |
|
threshold = 0.8 |
|
boxes = [] |
|
object_list = [] |
|
for box, score, label in zip(detections["boxes"], detections["scores"], detections["labels"]): |
|
if score > threshold: |
|
boxes.append(tuple(box.int().cpu().numpy())) |
|
label_idx = int(label) |
|
label_name = object_categories[label_idx] if label_idx < len(object_categories) else "Unknown" |
|
object_list.append(f"{label_name} ({score:.2f})") |
|
text = f"Detected {len(boxes)} object(s)" if boxes else "No objects detected" |
|
object_list_text = " | ".join(object_list) if object_list else "None" |
|
return boxes, text, object_list_text |
|
|
|
def compute_faces_overlay(image): |
|
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
h, w, _ = frame_bgr.shape |
|
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) |
|
small_h, small_w, _ = frame_bgr_small.shape |
|
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) |
|
face_results = face_detection.process(frame_rgb_small) |
|
boxes = [] |
|
if face_results.detections: |
|
for detection in face_results.detections: |
|
bbox = detection.location_data.relative_bounding_box |
|
x = int(bbox.xmin * small_w) |
|
y = int(bbox.ymin * small_h) |
|
box_w = int(bbox.width * small_w) |
|
box_h = int(bbox.height * small_h) |
|
boxes.append((x, y, x + box_w, y + box_h)) |
|
text = f"Detected {len(boxes)} face(s)" |
|
else: |
|
text = "No faces detected" |
|
return boxes, text |
|
|
|
def compute_facial_recognition_vector(image): |
|
""" |
|
Detects a face using MediaPipe, crops it, and computes its embedding vector |
|
using facebook/dino-vitb16. The raw vector is returned as a string. |
|
""" |
|
frame_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
frame_bgr_small = cv2.resize(frame_bgr, DESIRED_SIZE) |
|
frame_rgb_small = cv2.cvtColor(frame_bgr_small, cv2.COLOR_BGR2RGB) |
|
face_results = face_detection.process(frame_rgb_small) |
|
if face_results.detections: |
|
detection = face_results.detections[0] |
|
bbox = detection.location_data.relative_bounding_box |
|
h, w, _ = frame_rgb_small.shape |
|
x = int(bbox.xmin * w) |
|
y = int(bbox.ymin * h) |
|
box_w = int(bbox.width * w) |
|
box_h = int(bbox.height * h) |
|
face_crop = frame_rgb_small[y:y+box_h, x:x+box_w] |
|
face_image = Image.fromarray(face_crop) |
|
inputs = facial_recognition_extractor(face_image, return_tensors="pt").to(device) |
|
with torch.no_grad(): |
|
outputs = facial_recognition_model(**inputs) |
|
|
|
vector = outputs.last_hidden_state.mean(dim=1).squeeze() |
|
vector_np = vector.cpu().numpy() |
|
|
|
vector_str = np.array2string(vector_np, precision=2, separator=',') |
|
return face_crop, vector_str |
|
else: |
|
return np.array(image), "No face detected" |
|
|
|
|
|
|
|
|
|
def analyze_posture_current(image): |
|
global posture_cache |
|
posture_cache["counter"] += 1 |
|
current_frame = np.array(image) |
|
if posture_cache["counter"] % SKIP_RATE == 0 or posture_cache["landmarks"] is None: |
|
landmarks, text = compute_posture_overlay(image) |
|
posture_cache["landmarks"] = landmarks |
|
posture_cache["text"] = text |
|
output = current_frame.copy() |
|
if posture_cache["landmarks"]: |
|
output = draw_posture_overlay(output, posture_cache["landmarks"]) |
|
return output, f"<div style='color: lime !important;'>Posture Analysis: {posture_cache['text']}</div>" |
|
|
|
def analyze_emotion_current(image): |
|
global emotion_cache |
|
emotion_cache["counter"] += 1 |
|
current_frame = np.array(image) |
|
if emotion_cache["counter"] % SKIP_RATE == 0 or emotion_cache["text"] is None: |
|
text = compute_emotion_overlay(image) |
|
emotion_cache["text"] = text |
|
return current_frame, f"<div style='color: lime !important;'>Emotion Analysis: {emotion_cache['text']}</div>" |
|
|
|
def analyze_objects_current(image): |
|
global objects_cache |
|
objects_cache["counter"] += 1 |
|
current_frame = np.array(image) |
|
if objects_cache["counter"] % SKIP_RATE == 0 or objects_cache["boxes"] is None: |
|
boxes, text, object_list_text = compute_objects_overlay(image) |
|
objects_cache["boxes"] = boxes |
|
objects_cache["text"] = text |
|
objects_cache["object_list_text"] = object_list_text |
|
output = current_frame.copy() |
|
if objects_cache["boxes"]: |
|
output = draw_boxes_overlay(output, objects_cache["boxes"], (255, 255, 0)) |
|
combined_text = f"Object Detection: {objects_cache['text']}<br>Details: {objects_cache['object_list_text']}" |
|
return output, f"<div style='color: lime !important;'>{combined_text}</div>" |
|
|
|
def analyze_faces_current(image): |
|
global faces_cache |
|
faces_cache["counter"] += 1 |
|
current_frame = np.array(image) |
|
if faces_cache["counter"] % SKIP_RATE == 0 or faces_cache["boxes"] is None: |
|
boxes, text = compute_faces_overlay(image) |
|
faces_cache["boxes"] = boxes |
|
faces_cache["text"] = text |
|
output = current_frame.copy() |
|
if faces_cache["boxes"]: |
|
output = draw_boxes_overlay(output, faces_cache["boxes"], (0, 0, 255)) |
|
return output, f"<div style='color: lime !important;'>Face Detection: {faces_cache['text']}</div>" |
|
|
|
def analyze_facial_recognition(image): |
|
|
|
face_crop, vector_str = compute_facial_recognition_vector(image) |
|
return face_crop, f"<div style='color: lime !important;'>Facial Vector: {vector_str}</div>" |
|
|
|
def analyze_all(image): |
|
current_frame = np.array(image).copy() |
|
|
|
landmarks, posture_text = compute_posture_overlay(image) |
|
if landmarks: |
|
current_frame = draw_posture_overlay(current_frame, landmarks) |
|
|
|
emotion_text = compute_emotion_overlay(image) |
|
|
|
boxes_obj, objects_text, object_list_text = compute_objects_overlay(image) |
|
if boxes_obj: |
|
current_frame = draw_boxes_overlay(current_frame, boxes_obj, (255, 255, 0)) |
|
|
|
boxes_face, faces_text = compute_faces_overlay(image) |
|
if boxes_face: |
|
current_frame = draw_boxes_overlay(current_frame, boxes_face, (0, 0, 255)) |
|
|
|
combined_text = ( |
|
f"<b>Posture Analysis:</b> {posture_text}<br>" |
|
f"<b>Emotion Analysis:</b> {emotion_text}<br>" |
|
f"<b>Object Detection:</b> {objects_text}<br>" |
|
f"<b>Detected Objects:</b> {object_list_text}<br>" |
|
f"<b>Face Detection:</b> {faces_text}" |
|
) |
|
if object_list_text and object_list_text != "None": |
|
description_text = f"Image Description: The scene features {object_list_text}." |
|
else: |
|
description_text = "Image Description: No prominent objects detected." |
|
combined_text += f"<br><br><div style='border:1px solid lime; padding:10px; box-shadow: 0 0 10px lime;'><b>{description_text}</b></div>" |
|
combined_text_html = f"<div style='color: lime !important;'>{combined_text}</div>" |
|
return current_frame, combined_text_html |
|
|
|
|
|
|
|
|
|
custom_css = """ |
|
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap'); |
|
body { |
|
background-color: #0e0e0e; |
|
font-family: 'Orbitron', sans-serif; |
|
color: #32CD32; |
|
} |
|
.gradio-container { |
|
background: linear-gradient(135deg, #1a1a1a, #333333); |
|
border: 2px solid #32CD32; |
|
box-shadow: 0 0 15px #32CD32; |
|
border-radius: 10px; |
|
padding: 20px; |
|
max-width: 1200px; |
|
margin: auto; |
|
} |
|
.gradio-title, .gradio-description, .tab-item, .tab-item * { |
|
color: #32CD32 !important; |
|
text-shadow: 0 0 10px #32CD32; |
|
} |
|
input, button, .output { |
|
border: 1px solid #32CD32; |
|
box-shadow: 0 0 8px #32CD32; |
|
color: #32CD32; |
|
} |
|
""" |
|
|
|
|
|
|
|
|
|
posture_interface = gr.Interface( |
|
fn=analyze_posture_current, |
|
inputs=gr.Image(label="Upload an Image for Posture Analysis"), |
|
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Posture Analysis")], |
|
title="Posture", |
|
description="Detects your posture using MediaPipe with connector lines.", |
|
live=False |
|
) |
|
|
|
emotion_interface = gr.Interface( |
|
fn=analyze_emotion_current, |
|
inputs=gr.Image(label="Upload an Image for Emotion Analysis"), |
|
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Emotion Analysis")], |
|
title="Emotion", |
|
description="Detects facial emotions using FER.", |
|
live=False |
|
) |
|
|
|
objects_interface = gr.Interface( |
|
fn=analyze_objects_current, |
|
inputs=gr.Image(label="Upload an Image for Object Detection"), |
|
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Object Detection")], |
|
title="Objects", |
|
description="Detects objects using a pretrained Faster R-CNN.", |
|
live=False |
|
) |
|
|
|
faces_interface = gr.Interface( |
|
fn=analyze_faces_current, |
|
inputs=gr.Image(label="Upload an Image for Face Detection"), |
|
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Face Detection")], |
|
title="Faces", |
|
description="Detects faces using MediaPipe.", |
|
live=False |
|
) |
|
|
|
facial_recognition_interface = gr.Interface( |
|
fn=analyze_facial_recognition, |
|
inputs=gr.Image(label="Upload a Face Image for Facial Recognition"), |
|
outputs=[gr.Image(type="numpy", label="Cropped Face"), gr.HTML(label="Facial Recognition")], |
|
title="Facial Recognition", |
|
description="Extracts and outputs the facial vector using facebook/dino-vitb16.", |
|
live=False |
|
) |
|
|
|
all_interface = gr.Interface( |
|
fn=analyze_all, |
|
inputs=gr.Image(label="Upload an Image for All Inferences"), |
|
outputs=[gr.Image(type="numpy", label="Annotated Output"), gr.HTML(label="Combined Analysis")], |
|
title="All Inferences", |
|
description="Runs posture, emotion, object, and face detection all at once.", |
|
live=False |
|
) |
|
|
|
tabbed_interface = gr.TabbedInterface( |
|
interface_list=[ |
|
posture_interface, |
|
emotion_interface, |
|
objects_interface, |
|
faces_interface, |
|
facial_recognition_interface, |
|
all_interface |
|
], |
|
tab_names=[ |
|
"Posture", |
|
"Emotion", |
|
"Objects", |
|
"Faces", |
|
"Facial Recognition", |
|
"All Inferences" |
|
] |
|
) |
|
|
|
|
|
|
|
|
|
demo = gr.Blocks(css=custom_css) |
|
with demo: |
|
gr.Markdown("<h1 class='gradio-title' style='color: #32CD32;'>Multi-Analysis Image App</h1>") |
|
gr.Markdown("<p class='gradio-description' style='color: #32CD32;'>Upload an image to run high-tech analysis for posture, emotions, objects, faces, and facial embeddings.</p>") |
|
tabbed_interface.render() |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|