reab5555's picture
Update app.py
16178c1 verified
raw
history blame
29.4 kB
import math
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from facenet_pytorch import InceptionResnetV1, MTCNN
import tensorflow as tf
import mediapipe as mp
from fer import FER
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from moviepy.editor import VideoFileClip
from PIL import Image
import gradio as gr
import tempfile
import shutil
import copy
import time
matplotlib.rcParams['figure.dpi'] = 500
matplotlib.rcParams['savefig.dpi'] = 500
# Initialize models and other global variables
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.95, 0.95, 0.95], min_face_size=80)
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
emotion_detector = FER(mtcnn=False)
def frame_to_timecode(frame_num, total_frames, duration):
total_seconds = (frame_num / total_frames) * duration
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)
seconds = int(total_seconds % 60)
milliseconds = int((total_seconds - int(total_seconds)) * 1000)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
def seconds_to_timecode(seconds):
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = int(seconds % 60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
def timecode_to_seconds(timecode):
h, m, s = map(int, timecode.split(':'))
return h * 3600 + m * 60 + s
def get_face_embedding_and_emotion(face_img):
face_tensor = torch.tensor(face_img).permute(2, 0, 1).unsqueeze(0).float() / 255
face_tensor = (face_tensor - 0.5) / 0.5
face_tensor = face_tensor.to(device)
with torch.no_grad():
embedding = model(face_tensor)
emotions = emotion_detector.detect_emotions(face_img)
if emotions:
emotion_dict = emotions[0]['emotions']
else:
emotion_dict = {e: 0 for e in ['angry', 'disgust', 'fear', 'sad', 'happy']}
return embedding.cpu().numpy().flatten(), emotion_dict
def alignFace(img):
img_raw = img.copy()
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
return None
landmarks = results.multi_face_landmarks[0].landmark
left_eye = np.array([[landmarks[33].x, landmarks[33].y], [landmarks[160].x, landmarks[160].y],
[landmarks[158].x, landmarks[158].y], [landmarks[144].x, landmarks[144].y],
[landmarks[153].x, landmarks[153].y], [landmarks[145].x, landmarks[145].y]])
right_eye = np.array([[landmarks[362].x, landmarks[362].y], [landmarks[385].x, landmarks[385].y],
[landmarks[387].x, landmarks[387].y], [landmarks[263].x, landmarks[263].y],
[landmarks[373].x, landmarks[373].y], [landmarks[380].x, landmarks[380].y]])
left_eye_center = left_eye.mean(axis=0).astype(np.int32)
right_eye_center = right_eye.mean(axis=0).astype(np.int32)
dY = right_eye_center[1] - left_eye_center[1]
dX = right_eye_center[0] - left_eye_center[0]
angle = np.degrees(np.arctan2(dY, dX))
desired_angle = 0
angle_diff = desired_angle - angle
height, width = img_raw.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle_diff, 1)
new_img = cv2.warpAffine(img_raw, rotation_matrix, (width, height))
return new_img
def extract_frames(video_path, output_folder, desired_fps, progress_callback=None):
os.makedirs(output_folder, exist_ok=True)
clip = VideoFileClip(video_path)
original_fps = clip.fps
duration = clip.duration
total_frames = int(duration * original_fps)
step = max(1, original_fps / desired_fps)
total_frames_to_extract = int(total_frames / step)
frame_count = 0
for t in np.arange(0, duration, step / original_fps):
frame = clip.get_frame(t)
img = Image.fromarray(frame)
img.save(os.path.join(output_folder, f"frame_{frame_count:04d}.jpg"))
frame_count += 1
if progress_callback:
progress = min(100, (frame_count / total_frames_to_extract) * 100)
progress_callback(progress, f"Extracting frame")
if frame_count >= total_frames_to_extract:
break
clip.close()
return frame_count, original_fps
def is_frontal_face(landmarks, threshold=40):
nose_tip = landmarks[4]
left_chin = landmarks[234]
right_chin = landmarks[454]
nose_to_left = [left_chin.x - nose_tip.x, left_chin.y - nose_tip.y]
nose_to_right = [right_chin.x - nose_tip.x, right_chin.y - nose_tip.y]
dot_product = nose_to_left[0] * nose_to_right[0] + nose_to_left[1] * nose_to_right[1]
magnitude_left = math.sqrt(nose_to_left[0] ** 2 + nose_to_left[1] ** 2)
magnitude_right = math.sqrt(nose_to_right[0] ** 2 + nose_to_right[1] ** 2)
cos_angle = dot_product / (magnitude_left * magnitude_right)
angle = math.acos(cos_angle)
angle_degrees = math.degrees(angle)
return abs(180 - angle_degrees) < threshold
def process_frames(frames_folder, aligned_faces_folder, frame_count, progress, batch_size):
embeddings_by_frame = {}
emotions_by_frame = {}
aligned_face_paths = []
frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
for i in range(0, len(frame_files), batch_size):
batch_files = frame_files[i:i + batch_size]
batch_frames = []
batch_nums = []
for frame_file in batch_files:
frame_num = int(frame_file.split('_')[1].split('.')[0])
frame_path = os.path.join(frames_folder, frame_file)
frame = cv2.imread(frame_path)
if frame is not None:
batch_frames.append(frame)
batch_nums.append(frame_num)
if batch_frames:
batch_boxes, batch_probs = mtcnn.detect(batch_frames)
for j, (frame, frame_num, boxes, probs) in enumerate(
zip(batch_frames, batch_nums, batch_boxes, batch_probs)):
if boxes is not None and len(boxes) > 0 and probs[0] >= 0.99:
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
face = frame[y1:y2, x1:x2]
if face.size > 0:
results = face_mesh.process(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
if results.multi_face_landmarks and is_frontal_face(results.multi_face_landmarks[0].landmark):
aligned_face = alignFace(face)
if aligned_face is not None:
aligned_face_resized = cv2.resize(aligned_face, (160, 160))
output_path = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
cv2.imwrite(output_path, aligned_face_resized)
aligned_face_paths.append(output_path)
embedding, emotion = get_face_embedding_and_emotion(aligned_face_resized)
embeddings_by_frame[frame_num] = embedding
emotions_by_frame[frame_num] = emotion
progress((i + len(batch_files)) / len(frame_files),
f"Processing frames {i + 1} to {min(i + len(batch_files), len(frame_files))} of {len(frame_files)}")
return embeddings_by_frame, emotions_by_frame, aligned_face_paths
def cluster_faces(embeddings):
if len(embeddings) < 2:
print("Not enough faces for clustering. Assigning all to one cluster.")
return np.zeros(len(embeddings), dtype=int)
X = np.stack(embeddings)
dbscan = DBSCAN(eps=0.5, min_samples=5, metric='cosine')
clusters = dbscan.fit_predict(X)
if np.all(clusters == -1):
print("DBSCAN assigned all to noise. Considering as one cluster.")
return np.zeros(len(embeddings), dtype=int)
return clusters
def organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder):
for (frame_num, embedding), cluster in zip(embeddings_by_frame.items(), clusters):
person_folder = os.path.join(organized_faces_folder, f"person_{cluster}")
os.makedirs(person_folder, exist_ok=True)
src = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
dst = os.path.join(person_folder, f"frame_{frame_num}_face.jpg")
shutil.copy(src, dst)
def save_person_data_to_csv(embeddings_by_frame, emotions_by_frame, clusters, desired_fps, original_fps, output_folder, video_duration):
emotions = ['angry', 'disgust', 'fear', 'sad', 'happy']
person_data = {}
for (frame_num, embedding), (_, emotion_dict), cluster in zip(embeddings_by_frame.items(), emotions_by_frame.items(), clusters):
if cluster not in person_data:
person_data[cluster] = []
person_data[cluster].append((frame_num, embedding, {e: emotion_dict[e] for e in emotions}))
largest_cluster = max(person_data, key=lambda k: len(person_data[k]))
data = person_data[largest_cluster]
data.sort(key=lambda x: x[0])
frames, embeddings, emotions_data = zip(*data)
embeddings_array = np.array(embeddings)
np.save(os.path.join(output_folder, 'face_embeddings.npy'), embeddings_array)
total_frames = max(frames)
timecodes = [frame_to_timecode(frame, total_frames, video_duration) for frame in frames]
df_data = {
'Frame': frames,
'Timecode': timecodes,
'Embedding_Index': range(len(embeddings))
}
for i in range(len(embeddings[0])):
df_data[f'Raw_Embedding_{i}'] = [embedding[i] for embedding in embeddings]
for emotion in emotions:
df_data[emotion] = [e[emotion] for e in emotions_data]
df = pd.DataFrame(df_data)
return df, largest_cluster
class Autoencoder(nn.Module):
def __init__(self, input_size):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_size, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64)
)
self.decoder = nn.Sequential(
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, input_size)
)
def forward(self, x):
batch_size, seq_len, _ = x.size()
x = x.view(batch_size * seq_len, -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded.view(batch_size, seq_len, -1)
def determine_anomalies(mse_values, threshold):
mean = np.mean(mse_values)
std = np.std(mse_values)
anomalies = mse_values > (mean + threshold * std)
return anomalies
def anomaly_detection(X_emotions, X_embeddings, epochs=200, batch_size=8, patience=3):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Normalize emotions
scaler_emotions = MinMaxScaler()
X_emotions_scaled = scaler_emotions.fit_transform(X_emotions)
# Process emotions
X_emotions_scaled = torch.FloatTensor(X_emotions_scaled).to(device)
if X_emotions_scaled.dim() == 2:
X_emotions_scaled = X_emotions_scaled.unsqueeze(0)
model_emotions = Autoencoder(input_size=X_emotions_scaled.shape[2]).to(device)
criterion = nn.MSELoss()
optimizer_emotions = optim.Adam(model_emotions.parameters())
# Train emotions model
for epoch in range(epochs):
model_emotions.train()
optimizer_emotions.zero_grad()
output_emotions = model_emotions(X_emotions_scaled)
loss_emotions = criterion(output_emotions, X_emotions_scaled)
loss_emotions.backward()
optimizer_emotions.step()
# Process facial embeddings
X_embeddings = torch.FloatTensor(X_embeddings).to(device)
if X_embeddings.dim() == 2:
X_embeddings = X_embeddings.unsqueeze(0)
model_embeddings = Autoencoder(input_size=X_embeddings.shape[2]).to(device)
optimizer_embeddings = optim.Adam(model_embeddings.parameters())
# Train embeddings model
for epoch in range(epochs):
model_embeddings.train()
optimizer_embeddings.zero_grad()
output_embeddings = model_embeddings(X_embeddings)
loss_embeddings = criterion(output_embeddings, X_embeddings)
loss_embeddings.backward()
optimizer_embeddings.step()
# Compute MSE for emotions and embeddings
model_emotions.eval()
model_embeddings.eval()
with torch.no_grad():
reconstructed_emotions = model_emotions(X_emotions_scaled).cpu().numpy()
reconstructed_embeddings = model_embeddings(X_embeddings).cpu().numpy()
mse_emotions = np.mean(np.power(X_emotions_scaled.cpu().numpy() - reconstructed_emotions, 2), axis=2).squeeze()
mse_embeddings = np.mean(np.power(X_embeddings.cpu().numpy() - reconstructed_embeddings, 2), axis=2).squeeze()
return mse_emotions, mse_embeddings
def plot_mse(df, mse_values, title, color='blue', time_threshold=3, anomaly_threshold=4):
plt.figure(figsize=(16, 8), dpi=500)
fig, ax = plt.subplots(figsize=(16, 8))
if 'Seconds' not in df.columns:
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
# Ensure df and mse_values have the same length and remove NaN values
min_length = min(len(df), len(mse_values))
df = df.iloc[:min_length]
mse_values = mse_values[:min_length]
# Remove NaN values
mask = ~np.isnan(mse_values)
df = df[mask]
mse_values = mse_values[mask]
mean = pd.Series(mse_values).rolling(window=10).mean()
std = pd.Series(mse_values).rolling(window=10).std()
median = np.median(mse_values)
ax.scatter(df['Seconds'], mse_values, color=color, alpha=0.3, s=5)
ax.plot(df['Seconds'], mean, color=color, linewidth=2)
ax.fill_between(df['Seconds'], mean - std, mean + std, color=color, alpha=0.2)
# Add median line
ax.axhline(y=median, color='black', linestyle='--', label='Baseline')
ax.text(ax.get_xlim()[1], median, 'Baseline', verticalalignment='center', horizontalalignment='left', color='black')
# Add threshold line
threshold = np.mean(mse_values) + anomaly_threshold * np.std(mse_values)
ax.axhline(y=threshold, color='red', linestyle='--', label=f'Threshold: {anomaly_threshold:.1f}')
ax.text(ax.get_xlim()[1], threshold, f'Threshold: {anomaly_threshold:.1f}', verticalalignment='center', horizontalalignment='left', color='red')
anomalies = determine_anomalies(mse_values, anomaly_threshold)
anomaly_frames = df['Frame'].iloc[anomalies].tolist()
ax.scatter(df['Seconds'].iloc[anomalies], mse_values[anomalies], color='red', s=25, zorder=5)
anomaly_data = list(zip(df['Timecode'].iloc[anomalies],
df['Seconds'].iloc[anomalies],
mse_values[anomalies]))
anomaly_data.sort(key=lambda x: x[1])
grouped_anomalies = []
current_group = []
for timecode, sec, mse in anomaly_data:
if not current_group or sec - current_group[-1][1] <= time_threshold:
current_group.append((timecode, sec, mse))
else:
grouped_anomalies.append(current_group)
current_group = [(timecode, sec, mse)]
if current_group:
grouped_anomalies.append(current_group)
for group in grouped_anomalies:
start_sec = group[0][1]
end_sec = group[-1][1]
rect = Rectangle((start_sec, ax.get_ylim()[0]), end_sec - start_sec, ax.get_ylim()[1] - ax.get_ylim()[0],
facecolor='red', alpha=0.3, zorder=1)
ax.add_patch(rect)
for group in grouped_anomalies:
highest_mse_anomaly = max(group, key=lambda x: x[2])
timecode, sec, mse = highest_mse_anomaly
ax.annotate(timecode, (sec, mse), textcoords="offset points", xytext=(0, 10),
ha='center', fontsize=6, color='red')
max_seconds = df['Seconds'].max()
num_ticks = 100
tick_locations = np.linspace(0, max_seconds, num_ticks)
tick_labels = [seconds_to_timecode(int(s)) for s in tick_locations]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels, rotation=90, ha='center', fontsize=6)
ax.set_xlabel('Timecode')
ax.set_ylabel('Mean Squared Error')
ax.set_title(title)
ax.grid(True, linestyle='--', alpha=0.7)
ax.legend()
plt.tight_layout()
plt.close()
return fig, anomaly_frames
def plot_mse_histogram(mse_values, title, anomaly_threshold, color='blue'):
plt.figure(figsize=(16, 8), dpi=500)
fig, ax = plt.subplots(figsize=(16, 8))
ax.hist(mse_values, bins=100, edgecolor='black', color=color, alpha=0.7)
ax.set_xlabel('Mean Squared Error')
ax.set_ylabel('Number of Samples')
ax.set_title(title)
mean = np.mean(mse_values)
std = np.std(mse_values)
threshold = mean + anomaly_threshold * std
ax.axvline(x=threshold, color='red', linestyle='--', linewidth=2)
# Move annotation to the bottom and away from the line
ax.annotate(f'Threshold: {anomaly_threshold:.1f}',
xy=(threshold, ax.get_ylim()[0]),
xytext=(0, -20),
textcoords='offset points',
ha='center', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='white', ec='none', alpha=0.7),
color='red')
plt.tight_layout()
plt.close()
return fig
def plot_emotion(df, emotion, color, anomaly_threshold):
plt.figure(figsize=(16, 8), dpi=500)
fig, ax = plt.subplots(figsize=(16, 8))
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
mean = df[emotion].rolling(window=10).mean()
std = df[emotion].rolling(window=10).std()
median = df[emotion].median()
ax.scatter(df['Seconds'], df[emotion], color=color, alpha=0.3, s=5)
ax.plot(df['Seconds'], mean, color=color, linewidth=2)
ax.fill_between(df['Seconds'], mean - std, mean + std, color=color, alpha=0.2)
# Add median line
ax.axhline(y=median, color='black', linestyle='--', label='Baseline')
ax.text(ax.get_xlim()[1], median, 'Baseline', verticalalignment='center', horizontalalignment='left', color='black')
# Convert anomaly threshold to probability
probability_threshold = (anomaly_threshold - 1) / 6 # Convert 1-7 scale to 0-1 probability
# Add threshold line and detect anomalies
ax.axhline(y=probability_threshold, color='red', linestyle='--', label=f'Threshold: {probability_threshold:.2f}')
ax.text(ax.get_xlim()[1], probability_threshold, f'Threshold: {probability_threshold:.2f}',
verticalalignment='center', horizontalalignment='left', color='red')
# Detect and highlight anomalies
anomalies = df[emotion] >= probability_threshold
ax.scatter(df['Seconds'][anomalies], df[emotion][anomalies], color='red', s=25, zorder=5)
max_seconds = df['Seconds'].max()
num_ticks = 100
tick_locations = np.linspace(0, max_seconds, num_ticks)
tick_labels = [seconds_to_timecode(int(s)) for s in tick_locations]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels, rotation=90, ha='center', fontsize=6)
ax.set_xlabel('Timecode')
ax.set_ylabel('Emotion Probability')
ax.set_title(f"{emotion.capitalize()} Over Time")
ax.grid(True, linestyle='--', alpha=0.7)
ax.legend()
plt.tight_layout()
plt.close()
return fig
def get_all_face_samples(organized_faces_folder, output_folder, largest_cluster, max_samples=500):
face_samples = {"most_frequent": [], "others": []}
for cluster_folder in sorted(os.listdir(organized_faces_folder)):
if cluster_folder.startswith("person_"):
person_folder = os.path.join(organized_faces_folder, cluster_folder)
face_files = sorted([f for f in os.listdir(person_folder) if f.endswith('.jpg')])
if face_files:
cluster_id = int(cluster_folder.split('_')[1])
if cluster_id == largest_cluster:
for i, sample in enumerate(face_files[:max_samples]):
face_path = os.path.join(person_folder, sample)
output_path = os.path.join(output_folder, f"face_sample_most_frequent_{i:04d}.jpg")
face_img = cv2.imread(face_path)
if face_img is not None:
small_face = cv2.resize(face_img, (160, 160))
cv2.imwrite(output_path, small_face)
face_samples["most_frequent"].append(output_path)
if len(face_samples["most_frequent"]) >= max_samples:
break
else:
remaining_samples = max_samples - len(face_samples["others"])
if remaining_samples > 0:
for i, sample in enumerate(face_files[:remaining_samples]):
face_path = os.path.join(person_folder, sample)
output_path = os.path.join(output_folder, f"face_sample_other_{cluster_id:02d}_{i:04d}.jpg")
face_img = cv2.imread(face_path)
if face_img is not None:
small_face = cv2.resize(face_img, (160, 160))
cv2.imwrite(output_path, small_face)
face_samples["others"].append(output_path)
if len(face_samples["others"]) >= max_samples:
break
return face_samples
def process_video(video_path, anomaly_threshold, desired_fps, progress=gr.Progress()):
start_time = time.time()
output_folder = "output"
os.makedirs(output_folder, exist_ok=True)
batch_size = 16
with tempfile.TemporaryDirectory() as temp_dir:
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
organized_faces_folder = os.path.join(temp_dir, 'organized_faces')
os.makedirs(aligned_faces_folder, exist_ok=True)
os.makedirs(organized_faces_folder, exist_ok=True)
clip = VideoFileClip(video_path)
video_duration = clip.duration
clip.close()
progress(0, "Starting frame extraction")
frames_folder = os.path.join(temp_dir, 'extracted_frames')
def extraction_progress(percent, message):
progress(percent / 100, f"Extracting frames")
frame_count, original_fps = extract_frames(video_path, frames_folder, desired_fps, extraction_progress)
progress(1, "Frame extraction complete")
progress(0.3, "Processing frames")
embeddings_by_frame, emotions_by_frame, aligned_face_paths = process_frames(frames_folder, aligned_faces_folder,
frame_count,
progress, batch_size)
if not aligned_face_paths:
return ("No faces were extracted from the video.",) + (None,) * 10
progress(0.6, "Clustering faces")
embeddings = [embedding for _, embedding in embeddings_by_frame.items()]
clusters = cluster_faces(embeddings)
num_clusters = len(set(clusters))
progress(0.7, "Organizing faces")
organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder)
progress(0.8, "Saving person data")
df, largest_cluster = save_person_data_to_csv(embeddings_by_frame, emotions_by_frame, clusters, desired_fps,
original_fps, temp_dir, video_duration)
# Add 'Seconds' column to df
df['Seconds'] = df['Timecode'].apply(
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
progress(0.85, "Getting face samples")
face_samples = get_all_face_samples(organized_faces_folder, output_folder, largest_cluster)
progress(0.9, "Performing anomaly detection")
emotion_columns = ['angry', 'disgust', 'fear', 'sad', 'happy']
embedding_columns = [col for col in df.columns if col.startswith('Raw_Embedding_')]
X_emotions = df[emotion_columns].values
X_embeddings = df[embedding_columns].values
try:
mse_emotions, mse_embeddings = anomaly_detection(X_emotions, X_embeddings, batch_size=batch_size)
progress(0.95, "Generating plots")
mse_plot_embeddings, anomaly_frames_embeddings = plot_mse(df, mse_embeddings, "Facial Embeddings",
color='green',
anomaly_threshold=anomaly_threshold)
mse_histogram_embeddings = plot_mse_histogram(mse_embeddings, "MSE Distribution: Facial Embeddings",
anomaly_threshold, color='green')
# Add emotion plots
emotion_plots = []
for emotion, color in zip(emotion_columns, ['purple', 'brown', 'green', 'orange', 'darkblue']):
emotion_plot = plot_emotion(df, emotion, color, anomaly_threshold)
emotion_plots.append(emotion_plot)
mse_var_emotions = np.var(mse_emotions)
mse_var_embeddings = np.var(mse_embeddings)
except Exception as e:
print(f"Error details: {str(e)}")
return (f"Error in anomaly detection: {str(e)}",) + (None,) * 15
progress(1.0, "Preparing results")
results = f"Number of persons/clusters detected: {num_clusters}\n\n"
results += f"Breakdown of persons/clusters:\n"
for cluster_id in range(num_clusters):
results += f"Person/Cluster {cluster_id + 1}: {len([c for c in clusters if c == cluster_id])} frames\n"
end_time = time.time()
execution_time = end_time - start_time
# Load anomaly frames as images
anomaly_faces_embeddings = [
cv2.imread(os.path.join(aligned_faces_folder, f"frame_{frame}_face.jpg"))
for frame in anomaly_frames_embeddings
if os.path.exists(os.path.join(aligned_faces_folder, f"frame_{frame}_face.jpg"))
]
anomaly_faces_embeddings = [cv2.cvtColor(face, cv2.COLOR_BGR2RGB) for face in anomaly_faces_embeddings if face is not None]
return (
execution_time,
results,
df,
mse_embeddings,
mse_emotions,
mse_plot_embeddings,
mse_histogram_embeddings,
*emotion_plots,
face_samples["most_frequent"],
face_samples["others"],
anomaly_faces_embeddings,
aligned_faces_folder
)
with gr.Blocks() as iface:
gr.Markdown("# Facial Expressions Anomaly Detection")
with gr.Row():
video_input = gr.Video()
anomaly_threshold = gr.Slider(minimum=1, maximum=7, step=0.1, value=4.5, label="Anomaly Detection Threshold")
fps_slider = gr.Slider(minimum=10, maximum=20, step=5, value=20, label="Frames Per Second")
process_btn = gr.Button("Process Video")
execution_time = gr.Number(label="Execution Time (seconds)")
results_text = gr.Textbox(label="Anomaly Detection Results")
anomaly_frames_embeddings = gr.Gallery(label="Anomaly Frames (Facial Embeddings)", columns=6, rows=2, height="auto")
mse_embeddings_plot = gr.Plot(label="MSE: Facial Embeddings")
mse_embeddings_hist = gr.Plot(label="MSE Distribution: Facial Embeddings")
# Add emotion plots
emotion_plots = [gr.Plot(label=f"{emotion.capitalize()} Over Time") for emotion in ['angry', 'disgust', 'fear', 'sad', 'happy']]
face_samples_most_frequent = gr.Gallery(label="Most Frequent Person Samples (Target)", columns=6, rows=2, height="auto")
face_samples_others = gr.Gallery(label="Other Persons Samples", columns=6, rows=1, height="auto")
# Hidden components to store intermediate results
df_store = gr.State()
mse_emotions_store = gr.State()
mse_embeddings_store = gr.State()
aligned_faces_folder_store = gr.State()
process_btn.click(
process_video,
inputs=[video_input, anomaly_threshold, fps_slider],
outputs=[
execution_time, results_text, df_store, mse_embeddings_store, mse_emotions_store,
mse_embeddings_plot, mse_embeddings_hist,
*emotion_plots,
face_samples_most_frequent, face_samples_others, anomaly_frames_embeddings,
aligned_faces_folder_store
]
)
if __name__ == "__main__":
iface.launch()