reab5555's picture
Update app.py
0255b8c verified
raw
history blame
41.2 kB
import math
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from facenet_pytorch import InceptionResnetV1, MTCNN
import tensorflow as tf
import mediapipe as mp
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Rectangle
from moviepy.editor import VideoFileClip
from PIL import Image, ImageDraw, ImageFont
import gradio as gr
import tempfile
import shutil
import time
matplotlib.rcParams['figure.dpi'] = 400
matplotlib.rcParams['savefig.dpi'] = 400
# Initialize models and other global variables
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
FIXED_FPS = 5
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.95, 0.95, 0.95], min_face_size=80)
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.8)
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.8, min_tracking_confidence=0.8)
def frame_to_timecode(frame_num, total_frames, duration):
total_seconds = (frame_num / total_frames) * duration
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)
seconds = int(total_seconds % 60)
milliseconds = int((total_seconds - int(total_seconds)) * 1000)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
def seconds_to_timecode(seconds):
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = int(seconds % 60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
def timecode_to_seconds(timecode):
h, m, s = map(int, timecode.split(':'))
return h * 3600 + m * 60 + s
def get_face_embedding(face_img):
face_tensor = torch.tensor(face_img).permute(2, 0, 1).unsqueeze(0).float() / 255
face_tensor = (face_tensor - 0.5) / 0.5
face_tensor = face_tensor.to(device)
with torch.no_grad():
embedding = model(face_tensor)
return embedding.cpu().numpy().flatten()
def alignFace(img):
img_raw = img.copy()
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
return None
landmarks = results.multi_face_landmarks[0].landmark
left_eye = np.array([[landmarks[33].x, landmarks[33].y], [landmarks[160].x, landmarks[160].y],
[landmarks[158].x, landmarks[158].y], [landmarks[144].x, landmarks[144].y],
[landmarks[153].x, landmarks[153].y], [landmarks[145].x, landmarks[145].y]])
right_eye = np.array([[landmarks[362].x, landmarks[362].y], [landmarks[385].x, landmarks[385].y],
[landmarks[387].x, landmarks[387].y], [landmarks[263].x, landmarks[263].y],
[landmarks[373].x, landmarks[373].y], [landmarks[380].x, landmarks[380].y]])
left_eye_center = left_eye.mean(axis=0).astype(np.int32)
right_eye_center = right_eye.mean(axis=0).astype(np.int32)
dY = right_eye_center[1] - left_eye_center[1]
dX = right_eye_center[0] - left_eye_center[0]
angle = np.degrees(np.arctan2(dY, dX))
desired_angle = 0
angle_diff = desired_angle - angle
height, width = img_raw.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle_diff, 1)
new_img = cv2.warpAffine(img_raw, rotation_matrix, (width, height))
return new_img
def calculate_posture_score(frame):
image_height, image_width, _ = frame.shape
results = pose.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if not results.pose_landmarks:
return None, None
landmarks = results.pose_landmarks.landmark
# Use only body landmarks
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
left_knee = landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value]
right_knee = landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value]
# Calculate angles
shoulder_angle = abs(math.degrees(math.atan2(right_shoulder.y - left_shoulder.y, right_shoulder.x - left_shoulder.x)))
hip_angle = abs(math.degrees(math.atan2(right_hip.y - left_hip.y, right_hip.x - left_hip.x)))
knee_angle = abs(math.degrees(math.atan2(right_knee.y - left_knee.y, right_knee.x - left_knee.x)))
# Calculate vertical alignment
shoulder_hip_alignment = abs((left_shoulder.y + right_shoulder.y) / 2 - (left_hip.y + right_hip.y) / 2)
hip_knee_alignment = abs((left_hip.y + right_hip.y) / 2 - (left_knee.y + right_knee.y) / 2)
# Add head landmarks
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
left_ear = landmarks[mp_pose.PoseLandmark.LEFT_EAR.value]
right_ear = landmarks[mp_pose.PoseLandmark.RIGHT_EAR.value]
# Calculate head tilt
head_tilt = abs(math.degrees(math.atan2(right_ear.y - left_ear.y, right_ear.x - left_ear.x)))
# Calculate head position relative to shoulders
head_position = abs((nose.y - (left_shoulder.y + right_shoulder.y) / 2) /
((left_shoulder.y + right_shoulder.y) / 2 - (left_hip.y + right_hip.y) / 2))
# Combine metrics into a single posture score (you may need to adjust these weights)
posture_score = (
(1 - abs(shoulder_angle - hip_angle) / 90) * 0.3 +
(1 - abs(hip_angle - knee_angle) / 90) * 0.2 +
(1 - shoulder_hip_alignment) * 0.1 +
(1 - hip_knee_alignment) * 0.1 +
(1 - abs(head_tilt - 90) / 90) * 0.15 +
(1 - head_position) * 0.15
)
return posture_score, results.pose_landmarks
def extract_frames(video_path, output_folder, desired_fps, progress_callback=None):
os.makedirs(output_folder, exist_ok=True)
clip = VideoFileClip(video_path)
original_fps = clip.fps
duration = clip.duration
total_frames = int(duration * original_fps)
step = max(1, original_fps / desired_fps)
total_frames_to_extract = int(total_frames / step)
frame_count = 0
for t in np.arange(0, duration, step / original_fps):
frame = clip.get_frame(t)
img = Image.fromarray(frame)
img.save(os.path.join(output_folder, f"frame_{frame_count:04d}.jpg"))
frame_count += 1
if progress_callback:
progress = min(100, (frame_count / total_frames_to_extract) * 100)
progress_callback(progress, f"Extracting frame")
if frame_count >= total_frames_to_extract:
break
clip.close()
return frame_count, original_fps
def is_frontal_face(landmarks, threshold=40):
nose_tip = landmarks[4]
left_chin = landmarks[234]
right_chin = landmarks[454]
nose_to_left = [left_chin.x - nose_tip.x, left_chin.y - nose_tip.y]
nose_to_right = [right_chin.x - nose_tip.x, right_chin.y - nose_tip.y]
dot_product = nose_to_left[0] * nose_to_right[0] + nose_to_left[1] * nose_to_right[1]
magnitude_left = math.sqrt(nose_to_left[0] ** 2 + nose_to_left[1] ** 2)
magnitude_right = math.sqrt(nose_to_right[0] ** 2 + nose_to_right[1] ** 2)
cos_angle = dot_product / (magnitude_left * magnitude_right)
angle = math.acos(cos_angle)
angle_degrees = math.degrees(angle)
return abs(180 - angle_degrees) < threshold
def process_frames(frames_folder, aligned_faces_folder, frame_count, progress, batch_size):
embeddings_by_frame = {}
emotions_by_frame = {}
posture_scores_by_frame = {}
posture_landmarks_by_frame = {}
aligned_face_paths = []
frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
for i in range(0, len(frame_files), batch_size):
batch_files = frame_files[i:i + batch_size]
batch_frames = []
batch_nums = []
for frame_file in batch_files:
frame_num = int(frame_file.split('_')[1].split('.')[0])
frame_path = os.path.join(frames_folder, frame_file)
frame = cv2.imread(frame_path)
if frame is not None:
batch_frames.append(frame)
batch_nums.append(frame_num)
if batch_frames:
batch_boxes, batch_probs = mtcnn.detect(batch_frames)
for j, (frame, frame_num, boxes, probs) in enumerate(
zip(batch_frames, batch_nums, batch_boxes, batch_probs)):
# Calculate posture score for the full frame
posture_score, posture_landmarks = calculate_posture_score(frame)
posture_scores_by_frame[frame_num] = posture_score
posture_landmarks_by_frame[frame_num] = posture_landmarks
if boxes is not None and len(boxes) > 0 and probs[0] >= 0.99:
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
face = frame[y1:y2, x1:x2]
if face.size > 0:
results = face_mesh.process(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
if results.multi_face_landmarks and is_frontal_face(results.multi_face_landmarks[0].landmark):
#aligned_face = alignFace(face)
aligned_face = face
if aligned_face is not None:
aligned_face_resized = cv2.resize(aligned_face, (160, 160))
output_path = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
cv2.imwrite(output_path, aligned_face_resized)
aligned_face_paths.append(output_path)
embedding = get_face_embedding(aligned_face_resized)
embeddings_by_frame[frame_num] = embedding
progress((i + len(batch_files)) / len(frame_files),
f"Processing frames {i + 1} to {min(i + len(batch_files), len(frame_files))} of {len(frame_files)}")
return embeddings_by_frame, posture_scores_by_frame, posture_landmarks_by_frame, aligned_face_paths
def cluster_faces(embeddings):
if len(embeddings) < 2:
print("Not enough faces for clustering. Assigning all to one cluster.")
return np.zeros(len(embeddings), dtype=int)
X = np.stack(embeddings)
dbscan = DBSCAN(eps=0.5, min_samples=5, metric='cosine')
clusters = dbscan.fit_predict(X)
if np.all(clusters == -1):
print("DBSCAN assigned all to noise. Considering as one cluster.")
return np.zeros(len(embeddings), dtype=int)
return clusters
def organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder):
for (frame_num, embedding), cluster in zip(embeddings_by_frame.items(), clusters):
person_folder = os.path.join(organized_faces_folder, f"person_{cluster}")
os.makedirs(person_folder, exist_ok=True)
src = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
dst = os.path.join(person_folder, f"frame_{frame_num}_face.jpg")
shutil.copy(src, dst)
def save_person_data_to_csv(embeddings_by_frame, clusters, desired_fps, original_fps, output_folder, video_duration):
person_data = {}
for (frame_num, embedding), cluster in zip(embeddings_by_frame.items(), clusters):
if cluster not in person_data:
person_data[cluster] = []
person_data[cluster].append((frame_num, embedding))
largest_cluster = max(person_data, key=lambda k: len(person_data[k]))
data = person_data[largest_cluster]
data.sort(key=lambda x: x[0])
frames, embeddings = zip(*data)
embeddings_array = np.array(embeddings)
np.save(os.path.join(output_folder, 'face_embeddings.npy'), embeddings_array)
total_frames = max(frames)
timecodes = [frame_to_timecode(frame, total_frames, video_duration) for frame in frames]
df_data = {
'Frame': frames,
'Timecode': timecodes,
'Embedding_Index': range(len(embeddings))
}
for i in range(len(embeddings[0])):
df_data[f'Raw_Embedding_{i}'] = [embedding[i] for embedding in embeddings]
df = pd.DataFrame(df_data)
return df, largest_cluster
class Autoencoder(nn.Module):
def __init__(self, input_size):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_size, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32)
)
self.decoder = nn.Sequential(
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, input_size)
)
def forward(self, x):
batch_size, seq_len, _ = x.size()
x = x.view(batch_size * seq_len, -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded.view(batch_size, seq_len, -1)
def determine_anomalies(mse_values, threshold):
mean = np.mean(mse_values)
std = np.std(mse_values)
anomalies = mse_values > (mean + threshold * std)
return anomalies
def anomaly_detection(X_embeddings, X_posture, epochs=200, batch_size=8, patience=5):
# Normalize posture
scaler_posture = MinMaxScaler()
X_posture_scaled = scaler_posture.fit_transform(X_posture.reshape(-1, 1))
# Process facial embeddings
X_embeddings = torch.FloatTensor(X_embeddings).to(device)
if X_embeddings.dim() == 2:
X_embeddings = X_embeddings.unsqueeze(0)
# Process posture
X_posture_scaled = torch.FloatTensor(X_posture_scaled).to(device)
if X_posture_scaled.dim() == 2:
X_posture_scaled = X_posture_scaled.unsqueeze(0)
model_embeddings = Autoencoder(input_size=X_embeddings.shape[2]).to(device)
model_posture = Autoencoder(input_size=X_posture_scaled.shape[2]).to(device)
criterion = nn.MSELoss()
optimizer_embeddings = optim.Adam(model_embeddings.parameters())
optimizer_posture = optim.Adam(model_posture.parameters())
# Train models
for epoch in range(epochs):
for model, optimizer, X in [(model_embeddings, optimizer_embeddings, X_embeddings),
(model_posture, optimizer_posture, X_posture_scaled)]:
model.train()
optimizer.zero_grad()
output = model(X)
loss = criterion(output, X)
loss.backward()
optimizer.step()
# Compute MSE for embeddings and posture
model_embeddings.eval()
model_posture.eval()
with torch.no_grad():
reconstructed_embeddings = model_embeddings(X_embeddings).cpu().numpy()
reconstructed_posture = model_posture(X_posture_scaled).cpu().numpy()
mse_embeddings = np.mean(np.power(X_embeddings.cpu().numpy() - reconstructed_embeddings, 2), axis=2).squeeze()
mse_posture = np.mean(np.power(X_posture_scaled.cpu().numpy() - reconstructed_posture, 2), axis=2).squeeze()
return mse_embeddings, mse_posture
def plot_mse(df, mse_values, title, color='navy', time_threshold=3, anomaly_threshold=4):
plt.figure(figsize=(16, 8), dpi=400)
fig, ax = plt.subplots(figsize=(16, 8))
if 'Seconds' not in df.columns:
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
# Ensure df and mse_values have the same length and remove NaN values
min_length = min(len(df), len(mse_values))
df = df.iloc[:min_length]
mse_values = mse_values[:min_length]
# Remove NaN values
mask = ~np.isnan(mse_values)
df = df[mask]
mse_values = mse_values[mask]
mean = pd.Series(mse_values).rolling(window=10).mean()
std = pd.Series(mse_values).rolling(window=10).std()
median = np.median(mse_values)
ax.scatter(df['Seconds'], mse_values, color=color, alpha=0.3, s=5)
ax.plot(df['Seconds'], mean, color=color, linewidth=0.5)
ax.fill_between(df['Seconds'], mean - std, mean + std, color=color, alpha=0.1)
# Add median line
ax.axhline(y=median, color='black', linestyle='--', label='Median Baseline')
# Add threshold line
threshold = np.mean(mse_values) + anomaly_threshold * np.std(mse_values)
ax.axhline(y=threshold, color='red', linestyle='--', label=f'Threshold: {anomaly_threshold:.1f}')
ax.text(ax.get_xlim()[1], threshold, f'Threshold: {anomaly_threshold:.1f}', verticalalignment='center', horizontalalignment='left', color='red')
anomalies = determine_anomalies(mse_values, anomaly_threshold)
anomaly_frames = df['Frame'].iloc[anomalies].tolist()
ax.scatter(df['Seconds'].iloc[anomalies], mse_values[anomalies], color='red', s=20, zorder=5)
anomaly_data = list(zip(df['Timecode'].iloc[anomalies],
df['Seconds'].iloc[anomalies],
mse_values[anomalies]))
anomaly_data.sort(key=lambda x: x[1])
grouped_anomalies = []
current_group = []
for timecode, sec, mse in anomaly_data:
if not current_group or sec - current_group[-1][1] <= time_threshold:
current_group.append((timecode, sec, mse))
else:
grouped_anomalies.append(current_group)
current_group = [(timecode, sec, mse)]
if current_group:
grouped_anomalies.append(current_group)
for group in grouped_anomalies:
start_sec = group[0][1]
end_sec = group[-1][1]
rect = Rectangle((start_sec, ax.get_ylim()[0]), end_sec - start_sec, ax.get_ylim()[1] - ax.get_ylim()[0],
facecolor='red', alpha=0.2, zorder=1)
ax.add_patch(rect)
for group in grouped_anomalies:
highest_mse_anomaly = max(group, key=lambda x: x[2])
timecode, sec, mse = highest_mse_anomaly
ax.annotate(timecode, (sec, mse), textcoords="offset points", xytext=(0, 10),
ha='center', fontsize=6, color='red')
max_seconds = df['Seconds'].max()
num_ticks = 100
tick_locations = np.linspace(0, max_seconds, num_ticks)
tick_labels = [seconds_to_timecode(int(s)) for s in tick_locations]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels, rotation=90, ha='center', fontsize=6)
ax.set_xlabel('Timecode')
ax.set_ylabel('Mean Squared Error')
ax.set_title(title)
ax.grid(True, linestyle='--', alpha=0.7)
ax.legend()
plt.tight_layout()
plt.close()
return fig, anomaly_frames
def plot_mse_histogram(mse_values, title, anomaly_threshold, color='blue'):
plt.figure(figsize=(16, 4), dpi=400)
fig, ax = plt.subplots(figsize=(16, 4))
ax.hist(mse_values, bins=100, edgecolor='black', color=color, alpha=0.7)
ax.set_xlabel('Mean Squared Error')
ax.set_ylabel('Number of Samples')
ax.set_title(title)
mean = np.mean(mse_values)
std = np.std(mse_values)
threshold = mean + anomaly_threshold * std
ax.axvline(x=threshold, color='red', linestyle='--', linewidth=2)
# Move annotation to the bottom and away from the line
ax.annotate(f'Threshold: {anomaly_threshold:.1f}',
xy=(threshold, ax.get_ylim()[0]),
xytext=(0, -20),
textcoords='offset points',
ha='center', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='white', ec='none', alpha=0.7),
color='red')
plt.tight_layout()
plt.close()
return fig
def plot_posture(df, posture_scores, color='blue', anomaly_threshold=4):
plt.figure(figsize=(16, 8), dpi=400)
fig, ax = plt.subplots(figsize=(16, 8))
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
posture_data = [(frame, score) for frame, score in posture_scores.items() if score is not None]
posture_frames, posture_scores = zip(*posture_data)
# Create a new dataframe for posture data
posture_df = pd.DataFrame({'Frame': posture_frames, 'Score': posture_scores})
posture_df = posture_df.merge(df[['Frame', 'Seconds']], on='Frame', how='inner')
ax.scatter(posture_df['Seconds'], posture_df['Score'], color=color, alpha=0.3, s=5)
mean = posture_df['Score'].rolling(window=10).mean()
ax.plot(posture_df['Seconds'], mean, color=color, linewidth=0.5)
ax.set_xlabel('Timecode')
ax.set_ylabel('Posture Score')
ax.set_title("Body Posture Over Time")
ax.grid(True, linestyle='--', alpha=0.7)
max_seconds = df['Seconds'].max()
num_ticks = 80
tick_locations = np.linspace(0, max_seconds, num_ticks)
tick_labels = [seconds_to_timecode(int(s)) for s in tick_locations]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels, rotation=90, ha='center', fontsize=6)
plt.tight_layout()
plt.close()
return fig
def plot_mse_heatmap(mse_values, title, df):
plt.figure(figsize=(20, 5), dpi=400)
fig, ax = plt.subplots(figsize=(20, 5))
# Reshape MSE values to 2D array for heatmap
mse_2d = mse_values.reshape(1, -1)
# Create heatmap
sns.heatmap(mse_2d, cmap='YlOrRd', cbar_kws={'label': 'MSE'}, ax=ax)
# Set x-axis ticks to timecodes
num_ticks = 60
tick_locations = np.linspace(0, len(mse_values) - 1, num_ticks).astype(int)
tick_labels = [df['Timecode'].iloc[i] for i in tick_locations]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels, rotation=90, ha='center', va='top') # Adjusted rotation and alignment
ax.set_title(title)
# Remove y-axis labels
ax.set_yticks([])
plt.tight_layout() # Ensure all elements fit within the figure
plt.close()
return fig
def draw_pose_landmarks(frame, landmarks):
annotated_frame = frame.copy()
# Include relevant landmarks for head position and body
body_landmarks = [
mp_pose.PoseLandmark.NOSE,
mp_pose.PoseLandmark.LEFT_SHOULDER,
mp_pose.PoseLandmark.RIGHT_SHOULDER,
mp_pose.PoseLandmark.LEFT_EAR,
mp_pose.PoseLandmark.RIGHT_EAR,
mp_pose.PoseLandmark.LEFT_ELBOW,
mp_pose.PoseLandmark.RIGHT_ELBOW,
mp_pose.PoseLandmark.LEFT_WRIST,
mp_pose.PoseLandmark.RIGHT_WRIST,
mp_pose.PoseLandmark.LEFT_HIP,
mp_pose.PoseLandmark.RIGHT_HIP,
mp_pose.PoseLandmark.LEFT_KNEE,
mp_pose.PoseLandmark.RIGHT_KNEE,
mp_pose.PoseLandmark.LEFT_ANKLE,
mp_pose.PoseLandmark.RIGHT_ANKLE
]
# Connections for head position and body
body_connections = [
(mp_pose.PoseLandmark.LEFT_EAR, mp_pose.PoseLandmark.LEFT_SHOULDER),
(mp_pose.PoseLandmark.RIGHT_EAR, mp_pose.PoseLandmark.RIGHT_SHOULDER),
(mp_pose.PoseLandmark.NOSE, mp_pose.PoseLandmark.LEFT_SHOULDER),
(mp_pose.PoseLandmark.NOSE, mp_pose.PoseLandmark.RIGHT_SHOULDER),
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.RIGHT_SHOULDER),
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.LEFT_ELBOW),
(mp_pose.PoseLandmark.RIGHT_SHOULDER, mp_pose.PoseLandmark.RIGHT_ELBOW),
(mp_pose.PoseLandmark.LEFT_ELBOW, mp_pose.PoseLandmark.LEFT_WRIST),
(mp_pose.PoseLandmark.RIGHT_ELBOW, mp_pose.PoseLandmark.RIGHT_WRIST),
(mp_pose.PoseLandmark.LEFT_SHOULDER, mp_pose.PoseLandmark.LEFT_HIP),
(mp_pose.PoseLandmark.RIGHT_SHOULDER, mp_pose.PoseLandmark.RIGHT_HIP),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.RIGHT_HIP),
(mp_pose.PoseLandmark.LEFT_HIP, mp_pose.PoseLandmark.LEFT_KNEE),
(mp_pose.PoseLandmark.RIGHT_HIP, mp_pose.PoseLandmark.RIGHT_KNEE),
(mp_pose.PoseLandmark.LEFT_KNEE, mp_pose.PoseLandmark.LEFT_ANKLE),
(mp_pose.PoseLandmark.RIGHT_KNEE, mp_pose.PoseLandmark.RIGHT_ANKLE)
]
# Draw landmarks
for landmark in body_landmarks:
if landmark in landmarks.landmark:
lm = landmarks.landmark[landmark]
h, w, _ = annotated_frame.shape
cx, cy = int(lm.x * w), int(lm.y * h)
cv2.circle(annotated_frame, (cx, cy), 5, (245, 117, 66), -1)
# Draw connections
for connection in body_connections:
start_lm = landmarks.landmark[connection[0]]
end_lm = landmarks.landmark[connection[1]]
h, w, _ = annotated_frame.shape
start_point = (int(start_lm.x * w), int(start_lm.y * h))
end_point = (int(end_lm.x * w), int(end_lm.y * h))
cv2.line(annotated_frame, start_point, end_point, (245, 66, 230), 2)
# Highlight head tilt
left_ear = landmarks.landmark[mp_pose.PoseLandmark.LEFT_EAR]
right_ear = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_EAR]
nose = landmarks.landmark[mp_pose.PoseLandmark.NOSE]
h, w, _ = annotated_frame.shape
left_ear_point = (int(left_ear.x * w), int(left_ear.y * h))
right_ear_point = (int(right_ear.x * w), int(right_ear.y * h))
nose_point = (int(nose.x * w), int(nose.y * h))
# Draw a line between ears to show head tilt
cv2.line(annotated_frame, left_ear_point, right_ear_point, (0, 255, 0), 2)
# Draw a line from nose to the midpoint between shoulders to show head forward/backward tilt
left_shoulder = landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER]
right_shoulder = landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER]
shoulder_mid_x = (left_shoulder.x + right_shoulder.x) / 2
shoulder_mid_y = (left_shoulder.y + right_shoulder.y) / 2
shoulder_mid_point = (int(shoulder_mid_x * w), int(shoulder_mid_y * h))
cv2.line(annotated_frame, nose_point, shoulder_mid_point, (0, 255, 0), 2)
return annotated_frame
def get_all_face_samples(organized_faces_folder, output_folder, largest_cluster, max_samples=500):
face_samples = {"most_frequent": [], "others": []}
for cluster_folder in sorted(os.listdir(organized_faces_folder)):
if cluster_folder.startswith("person_"):
person_folder = os.path.join(organized_faces_folder, cluster_folder)
face_files = sorted([f for f in os.listdir(person_folder) if f.endswith('.jpg')])
if face_files:
cluster_id = int(cluster_folder.split('_')[1])
if cluster_id == largest_cluster:
for i, sample in enumerate(face_files[:max_samples]):
face_path = os.path.join(person_folder, sample)
output_path = os.path.join(output_folder, f"face_sample_most_frequent_{i:04d}.jpg")
face_img = cv2.imread(face_path)
if face_img is not None:
small_face = cv2.resize(face_img, (160, 160))
cv2.imwrite(output_path, small_face)
face_samples["most_frequent"].append(output_path)
if len(face_samples["most_frequent"]) >= max_samples:
break
else:
remaining_samples = max_samples - len(face_samples["others"])
if remaining_samples > 0:
for i, sample in enumerate(face_files[:remaining_samples]):
face_path = os.path.join(person_folder, sample)
output_path = os.path.join(output_folder, f"face_sample_other_{cluster_id:02d}_{i:04d}.jpg")
face_img = cv2.imread(face_path)
if face_img is not None:
small_face = cv2.resize(face_img, (160, 160))
cv2.imwrite(output_path, small_face)
face_samples["others"].append(output_path)
if len(face_samples["others"]) >= max_samples:
break
return face_samples
def process_video(video_path, anomaly_threshold, desired_fps, progress=gr.Progress()):
start_time = time.time()
output_folder = "output"
os.makedirs(output_folder, exist_ok=True)
batch_size = 16
GRAPH_COLORS = {
'facial_embeddings': 'navy',
'body_posture': 'purple'
}
with tempfile.TemporaryDirectory() as temp_dir:
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
organized_faces_folder = os.path.join(temp_dir, 'organized_faces')
os.makedirs(aligned_faces_folder, exist_ok=True)
os.makedirs(organized_faces_folder, exist_ok=True)
clip = VideoFileClip(video_path)
video_duration = clip.duration
clip.close()
progress(0, "Starting frame extraction")
frames_folder = os.path.join(temp_dir, 'extracted_frames')
def extraction_progress(percent, message):
progress(percent / 100, f"Extracting frames")
frame_count, original_fps = extract_frames(video_path, frames_folder, desired_fps, extraction_progress)
progress(1, "Frame extraction complete")
progress(0.3, "Processing frames")
embeddings_by_frame, posture_scores_by_frame, posture_landmarks_by_frame, aligned_face_paths = process_frames(
frames_folder, aligned_faces_folder,
frame_count,
progress, batch_size)
if not aligned_face_paths:
raise ValueError("No faces were extracted from the video.")
progress(0.6, "Clustering faces")
embeddings = [embedding for _, embedding in embeddings_by_frame.items()]
clusters = cluster_faces(embeddings)
num_clusters = len(set(clusters))
progress(0.7, "Organizing faces")
organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder)
progress(0.8, "Saving person data")
df, largest_cluster = save_person_data_to_csv(embeddings_by_frame, clusters, desired_fps,
original_fps, temp_dir, video_duration)
# Add 'Seconds' column to df
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
progress(0.85, "Getting face samples")
face_samples = get_all_face_samples(organized_faces_folder, output_folder, largest_cluster)
progress(0.9, "Performing anomaly detection")
embedding_columns = [col for col in df.columns if col.startswith('Raw_Embedding_')]
X_embeddings = df[embedding_columns].values
try:
X_posture = np.array([posture_scores_by_frame.get(frame, None) for frame in df['Frame']])
X_posture = X_posture[X_posture != None].reshape(-1, 1) # Remove None values and reshape
# Ensure X_posture is not empty
if len(X_posture) == 0:
raise ValueError("No valid posture data found")
mse_embeddings, mse_posture = anomaly_detection(X_embeddings, X_posture, batch_size=batch_size)
progress(0.95, "Generating plots")
mse_plot_embeddings, anomaly_frames_embeddings = plot_mse(df, mse_embeddings, "Facial Features",
color=GRAPH_COLORS['facial_embeddings'],
anomaly_threshold=anomaly_threshold)
mse_histogram_embeddings = plot_mse_histogram(mse_embeddings, "MSE Distribution: Facial Features",
anomaly_threshold, color=GRAPH_COLORS['facial_embeddings'])
mse_plot_posture, anomaly_frames_posture = plot_mse(df, mse_posture, "Body Posture",
color=GRAPH_COLORS['body_posture'],
anomaly_threshold=anomaly_threshold)
mse_histogram_posture = plot_mse_histogram(mse_posture, "MSE Distribution: Body Posture",
anomaly_threshold, color=GRAPH_COLORS['body_posture'])
mse_heatmap_embeddings = plot_mse_heatmap(mse_embeddings, "Facial Features MSE Heatmap", df)
mse_heatmap_posture = plot_mse_heatmap(mse_posture, "Body Posture MSE Heatmap", df)
except Exception as e:
print(f"Error details: {str(e)}")
import traceback
traceback.print_exc()
return (f"Error in video processing: {str(e)}",) + (None,) * 14
progress(1.0, "Preparing results")
results = f"Number of persons detected: {num_clusters}\n\n"
results += "Breakdown:\n"
for cluster_id in range(num_clusters):
face_count = len([c for c in clusters if c == cluster_id])
results += f"Person {cluster_id + 1}: {face_count} face frames\n"
end_time = time.time()
execution_time = end_time - start_time
def add_timecode_to_image(image, timecode):
img_pil = Image.fromarray(image)
draw = ImageDraw.Draw(img_pil)
font = ImageFont.truetype("arial.ttf", 15)
draw.text((10, 10), timecode, (255, 0, 0), font=font)
return np.array(img_pil)
# In the process_video function, update the anomaly frame processing:
anomaly_faces_embeddings = []
for frame in anomaly_frames_embeddings:
face_path = os.path.join(aligned_faces_folder, f"frame_{frame}_face.jpg")
if os.path.exists(face_path):
face_img = cv2.imread(face_path)
if face_img is not None:
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
timecode = df[df['Frame'] == frame]['Timecode'].iloc[0]
face_img_with_timecode = add_timecode_to_image(face_img, timecode)
anomaly_faces_embeddings.append(face_img_with_timecode)
anomaly_frames_posture_images = []
for frame in anomaly_frames_posture:
frame_path = os.path.join(frames_folder, f"frame_{frame:04d}.jpg")
if os.path.exists(frame_path):
frame_img = cv2.imread(frame_path)
if frame_img is not None:
frame_img = cv2.cvtColor(frame_img, cv2.COLOR_BGR2RGB)
pose_results = pose.process(frame_img)
if pose_results.pose_landmarks:
frame_img = draw_pose_landmarks(frame_img, pose_results.pose_landmarks)
timecode = df[df['Frame'] == frame]['Timecode'].iloc[0]
frame_img_with_timecode = add_timecode_to_image(frame_img, timecode)
anomaly_frames_posture_images.append(frame_img_with_timecode)
return (
execution_time,
results,
df,
mse_embeddings,
mse_posture,
mse_plot_embeddings,
mse_histogram_embeddings,
mse_plot_posture,
mse_histogram_posture,
mse_heatmap_embeddings,
mse_heatmap_posture,
face_samples["most_frequent"],
face_samples["others"],
anomaly_faces_embeddings,
anomaly_frames_posture_images,
aligned_faces_folder,
frames_folder
)
with gr.Blocks() as iface:
gr.Markdown("""
# Facial Expression and Body Language Anomaly Detection
This application analyzes videos to detect anomalies in facial features and body language.
It processes the video frames to extract facial embeddings and body posture,
then uses machine learning techniques to identify unusual patterns or deviations from the norm.
For more information, visit: [https://github.com/reab5555/Facial-Expression-Anomaly-Detection](https://github.com/reab5555/Facial-Expression-Anomaly-Detection)
""")
with gr.Row():
video_input = gr.Video()
anomaly_threshold = gr.Slider(minimum=1, maximum=5, step=0.1, value=3, label="Anomaly Detection Threshold")
process_btn = gr.Button("Process Video")
progress_bar = gr.Progress()
execution_time = gr.Number(label="Execution Time (seconds)")
with gr.Group(visible=False) as results_group:
results_text = gr.TextArea(label="Anomaly Detection Results", lines=4)
with gr.Tab("Facial Features"):
mse_features_plot = gr.Plot(label="MSE: Facial Features")
mse_features_hist = gr.Plot(label="MSE Distribution: Facial Features")
mse_features_heatmap = gr.Plot(label="MSE Heatmap: Facial Features")
anomaly_frames_features = gr.Gallery(label="Anomaly Frames (Facial Features)", columns=6, rows=2, height="auto")
with gr.Tab("Body Posture"):
mse_posture_plot = gr.Plot(label="MSE: Body Posture")
mse_posture_hist = gr.Plot(label="MSE Distribution: Body Posture")
mse_posture_heatmap = gr.Plot(label="MSE Heatmap: Body Posture")
anomaly_frames_posture = gr.Gallery(label="Anomaly Frames (Body Posture)", columns=6, rows=2, height="auto")
with gr.Tab("Face Samples"):
face_samples_most_frequent = gr.Gallery(label="Most Frequent Person Samples (Target)", columns=6, rows=2, height="auto")
face_samples_others = gr.Gallery(label="Other Persons Samples", columns=6, rows=1, height="auto")
# Hidden components to store intermediate results
df_store = gr.State()
mse_features_store = gr.State()
mse_posture_store = gr.State()
aligned_faces_folder_store = gr.State()
frames_folder_store = gr.State()
mse_heatmap_embeddings_store = gr.State()
mse_heatmap_posture_store = gr.State()
def process_and_show_completion(video_input_path, anomaly_threshold_input):
try:
print("Starting video processing...")
results = process_video(video_input_path, anomaly_threshold_input, FIXED_FPS, progress=progress_bar)
print("Video processing completed.")
if isinstance(results[0], str) and results[0].startswith("Error"):
print(f"Error occurred: {results[0]}")
return [results[0]] + [None] * 18 # Update this line to match the number of outputs
exec_time, results_summary, df, mse_embeddings, mse_posture, \
mse_plot_embeddings, mse_histogram_embeddings, \
mse_plot_posture, mse_histogram_posture, \
mse_heatmap_embeddings, mse_heatmap_posture, \
face_samples_frequent, face_samples_other, \
anomaly_faces_embeddings, anomaly_frames_posture_images, \
aligned_faces_folder, frames_folder = results
# Convert numpy arrays to PIL Images for the galleries
anomaly_faces_embeddings_pil = [Image.fromarray(face) for face in anomaly_faces_embeddings]
anomaly_frames_posture_pil = [Image.fromarray(frame) for frame in anomaly_frames_posture_images]
# Ensure face samples are in the correct format for Gradio
face_samples_frequent = [Image.open(path) for path in face_samples_frequent]
face_samples_other = [Image.open(path) for path in face_samples_other]
output = [
exec_time, results_summary,
df, mse_embeddings, mse_posture,
mse_plot_embeddings, mse_plot_posture,
mse_histogram_embeddings, mse_histogram_posture,
mse_heatmap_embeddings, mse_heatmap_posture,
anomaly_faces_embeddings_pil, anomaly_frames_posture_pil,
face_samples_frequent, face_samples_other,
aligned_faces_folder, frames_folder,
mse_embeddings, mse_posture
]
return output
except Exception as e:
error_message = f"An error occurred: {str(e)}"
print(error_message)
import traceback
traceback.print_exc()
return [error_message] + [None] * 18
process_btn.click(
process_and_show_completion,
inputs=[video_input, anomaly_threshold],
outputs=[
execution_time, results_text, df_store,
mse_features_store, mse_posture_store,
mse_features_plot, mse_posture_plot,
mse_features_hist, mse_posture_hist,
mse_features_heatmap, mse_posture_heatmap,
anomaly_frames_features, anomaly_frames_posture,
face_samples_most_frequent, face_samples_others,
aligned_faces_folder_store, frames_folder_store,
mse_heatmap_embeddings_store, mse_heatmap_posture_store
]
).then(
lambda: gr.Group(visible=True),
inputs=None,
outputs=[results_group]
)
if __name__ == "__main__":
iface.launch()