Spaces:
Runtime error
Runtime error
Update video_processing.py
Browse files- video_processing.py +349 -347
video_processing.py
CHANGED
@@ -1,347 +1,349 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
from moviepy.editor import VideoFileClip
|
5 |
-
import tempfile
|
6 |
-
import time
|
7 |
-
from PIL import Image, ImageDraw, ImageFont
|
8 |
-
import math
|
9 |
-
from face_analysis import get_face_embedding, cluster_faces, organize_faces_by_person
|
10 |
-
from pose_analysis import calculate_posture_score, draw_pose_landmarks
|
11 |
-
from anomaly_detection import anomaly_detection
|
12 |
-
from visualization import plot_mse, plot_mse_histogram, plot_mse_heatmap
|
13 |
-
from utils import frame_to_timecode, parse_transcription, get_sentences_before_anomalies
|
14 |
-
from transcribe import transcribe
|
15 |
-
import pandas as pd
|
16 |
-
from facenet_pytorch import MTCNN
|
17 |
-
import torch
|
18 |
-
import mediapipe as mp
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
for
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
cv2.
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
os.
|
104 |
-
os.
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
clip
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
draw
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
'
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from moviepy.editor import VideoFileClip
|
5 |
+
import tempfile
|
6 |
+
import time
|
7 |
+
from PIL import Image, ImageDraw, ImageFont
|
8 |
+
import math
|
9 |
+
from face_analysis import get_face_embedding, cluster_faces, organize_faces_by_person
|
10 |
+
from pose_analysis import calculate_posture_score, draw_pose_landmarks
|
11 |
+
from anomaly_detection import anomaly_detection
|
12 |
+
from visualization import plot_mse, plot_mse_histogram, plot_mse_heatmap
|
13 |
+
from utils import frame_to_timecode, parse_transcription, get_sentences_before_anomalies
|
14 |
+
from transcribe import transcribe
|
15 |
+
import pandas as pd
|
16 |
+
from facenet_pytorch import MTCNN
|
17 |
+
import torch
|
18 |
+
import mediapipe as mp
|
19 |
+
|
20 |
+
@spaces.GPU(duration=300)
|
21 |
+
|
22 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
+
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.95, 0.95, 0.95], min_face_size=80)
|
24 |
+
|
25 |
+
mp_face_mesh = mp.solutions.face_mesh
|
26 |
+
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.7)
|
27 |
+
|
28 |
+
mp_pose = mp.solutions.pose
|
29 |
+
pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.7, min_tracking_confidence=0.7)
|
30 |
+
|
31 |
+
def extract_frames(video_path, output_folder, desired_fps, progress_callback=None):
|
32 |
+
os.makedirs(output_folder, exist_ok=True)
|
33 |
+
clip = VideoFileClip(video_path)
|
34 |
+
original_fps = clip.fps
|
35 |
+
duration = clip.duration
|
36 |
+
total_frames = int(duration * original_fps)
|
37 |
+
step = max(1, original_fps / desired_fps)
|
38 |
+
total_frames_to_extract = int(total_frames / step)
|
39 |
+
|
40 |
+
frame_count = 0
|
41 |
+
for t in np.arange(0, duration, step / original_fps):
|
42 |
+
frame = clip.get_frame(t)
|
43 |
+
cv2.imwrite(os.path.join(output_folder, f"frame_{frame_count:04d}.jpg"), cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
44 |
+
frame_count += 1
|
45 |
+
if progress_callback:
|
46 |
+
progress = min(100, (frame_count / total_frames_to_extract) * 100)
|
47 |
+
progress_callback(progress, f"Extracting frame")
|
48 |
+
if frame_count >= total_frames_to_extract:
|
49 |
+
break
|
50 |
+
clip.close()
|
51 |
+
return frame_count, original_fps
|
52 |
+
|
53 |
+
def process_frames(frames_folder, aligned_faces_folder, frame_count, progress):
|
54 |
+
embeddings_by_frame = {}
|
55 |
+
posture_scores_by_frame = {}
|
56 |
+
posture_landmarks_by_frame = {}
|
57 |
+
aligned_face_paths = []
|
58 |
+
frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
59 |
+
|
60 |
+
for i, frame_file in enumerate(frame_files):
|
61 |
+
frame_num = int(frame_file.split('_')[1].split('.')[0])
|
62 |
+
frame_path = os.path.join(frames_folder, frame_file)
|
63 |
+
frame = cv2.imread(frame_path)
|
64 |
+
|
65 |
+
if frame is not None:
|
66 |
+
posture_score, posture_landmarks = calculate_posture_score(frame)
|
67 |
+
posture_scores_by_frame[frame_num] = posture_score
|
68 |
+
posture_landmarks_by_frame[frame_num] = posture_landmarks
|
69 |
+
|
70 |
+
boxes, probs = mtcnn.detect(frame)
|
71 |
+
|
72 |
+
if boxes is not None and len(boxes) > 0 and probs[0] >= 0.99:
|
73 |
+
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
|
74 |
+
face = frame[y1:y2, x1:x2]
|
75 |
+
if face.size > 0:
|
76 |
+
results = face_mesh.process(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
|
77 |
+
if results.multi_face_landmarks and is_frontal_face(results.multi_face_landmarks[0].landmark):
|
78 |
+
aligned_face = face
|
79 |
+
|
80 |
+
if aligned_face is not None:
|
81 |
+
aligned_face_resized = cv2.resize(aligned_face, (160, 160))
|
82 |
+
output_path = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
|
83 |
+
cv2.imwrite(output_path, aligned_face_resized)
|
84 |
+
aligned_face_paths.append(output_path)
|
85 |
+
embedding = get_face_embedding(aligned_face_resized)
|
86 |
+
embeddings_by_frame[frame_num] = embedding
|
87 |
+
|
88 |
+
progress((i + 1) / len(frame_files), f"Processing frame {i + 1} of {len(frame_files)}")
|
89 |
+
|
90 |
+
return embeddings_by_frame, posture_scores_by_frame, posture_landmarks_by_frame, aligned_face_paths
|
91 |
+
|
92 |
+
def process_video(video_path, anomaly_threshold, desired_fps, progress=None):
|
93 |
+
start_time = time.time()
|
94 |
+
output_folder = "output"
|
95 |
+
os.makedirs(output_folder, exist_ok=True)
|
96 |
+
|
97 |
+
GRAPH_COLORS = {
|
98 |
+
'facial_embeddings': 'navy',
|
99 |
+
'body_posture': 'purple'
|
100 |
+
}
|
101 |
+
|
102 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
103 |
+
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
|
104 |
+
organized_faces_folder = os.path.join(temp_dir, 'organized_faces')
|
105 |
+
os.makedirs(aligned_faces_folder, exist_ok=True)
|
106 |
+
os.makedirs(organized_faces_folder, exist_ok=True)
|
107 |
+
|
108 |
+
clip = VideoFileClip(video_path)
|
109 |
+
video_duration = clip.duration
|
110 |
+
clip.close()
|
111 |
+
|
112 |
+
progress(0, "Starting frame extraction")
|
113 |
+
frames_folder = os.path.join(temp_dir, 'extracted_frames')
|
114 |
+
|
115 |
+
def extraction_progress(percent, message):
|
116 |
+
progress(percent / 100, f"Extracting frames")
|
117 |
+
|
118 |
+
frame_count, original_fps = extract_frames(video_path, frames_folder, desired_fps, extraction_progress)
|
119 |
+
|
120 |
+
progress(1, "Frame extraction complete")
|
121 |
+
progress(0.3, "Processing frames")
|
122 |
+
embeddings_by_frame, posture_scores_by_frame, posture_landmarks_by_frame, aligned_face_paths = process_frames(
|
123 |
+
frames_folder, aligned_faces_folder,
|
124 |
+
frame_count,
|
125 |
+
progress)
|
126 |
+
|
127 |
+
if not aligned_face_paths:
|
128 |
+
raise ValueError("No faces were extracted from the video.")
|
129 |
+
|
130 |
+
progress(0.6, "Clustering faces")
|
131 |
+
embeddings = [embedding for _, embedding in embeddings_by_frame.items()]
|
132 |
+
clusters = cluster_faces(embeddings)
|
133 |
+
num_clusters = len(set(clusters))
|
134 |
+
|
135 |
+
progress(0.7, "Organizing faces")
|
136 |
+
organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder)
|
137 |
+
|
138 |
+
progress(0.8, "Saving person data")
|
139 |
+
df, largest_cluster = save_person_data_to_csv(embeddings_by_frame, clusters, desired_fps,
|
140 |
+
original_fps, temp_dir, video_duration)
|
141 |
+
|
142 |
+
df['Seconds'] = df['Timecode'].apply(
|
143 |
+
lambda x: sum(float(t) * 60 ** i for i, t in enumerate(reversed(x.split(':')))))
|
144 |
+
|
145 |
+
progress(0.85, "Getting face samples")
|
146 |
+
face_samples = get_all_face_samples(organized_faces_folder, output_folder, largest_cluster)
|
147 |
+
|
148 |
+
progress(0.9, "Performing anomaly detection")
|
149 |
+
embedding_columns = [col for col in df.columns if col.startswith('Raw_Embedding_')]
|
150 |
+
|
151 |
+
X_embeddings = df[embedding_columns].values
|
152 |
+
|
153 |
+
try:
|
154 |
+
X_posture = np.array([posture_scores_by_frame.get(frame, None) for frame in df['Frame']])
|
155 |
+
X_posture = X_posture[X_posture != None].reshape(-1, 1)
|
156 |
+
|
157 |
+
if len(X_posture) == 0:
|
158 |
+
raise ValueError("No valid posture data found")
|
159 |
+
|
160 |
+
mse_embeddings, mse_posture = anomaly_detection(X_embeddings, X_posture)
|
161 |
+
|
162 |
+
progress(0.95, "Generating plots")
|
163 |
+
mse_plot_embeddings, anomaly_frames_embeddings = plot_mse(df, mse_embeddings, "Facial Features",
|
164 |
+
color=GRAPH_COLORS['facial_embeddings'],
|
165 |
+
anomaly_threshold=anomaly_threshold)
|
166 |
+
|
167 |
+
mse_histogram_embeddings = plot_mse_histogram(mse_embeddings, "MSE Distribution: Facial Features",
|
168 |
+
anomaly_threshold, color=GRAPH_COLORS['facial_embeddings'])
|
169 |
+
|
170 |
+
mse_plot_posture, anomaly_frames_posture = plot_mse(df, mse_posture, "Body Posture",
|
171 |
+
color=GRAPH_COLORS['body_posture'],
|
172 |
+
anomaly_threshold=anomaly_threshold)
|
173 |
+
|
174 |
+
mse_histogram_posture = plot_mse_histogram(mse_posture, "MSE Distribution: Body Posture",
|
175 |
+
anomaly_threshold, color=GRAPH_COLORS['body_posture'])
|
176 |
+
|
177 |
+
mse_heatmap_posture = plot_mse_heatmap(mse_posture, "Body Posture MSE Heatmap", df)
|
178 |
+
|
179 |
+
mse_heatmap_embeddings = plot_mse_heatmap(mse_embeddings, "Facial Features MSE Heatmap", df)
|
180 |
+
|
181 |
+
except Exception as e:
|
182 |
+
print(f"Error details: {str(e)}")
|
183 |
+
import traceback
|
184 |
+
traceback.print_exc()
|
185 |
+
return (f"Error in video processing: {str(e)}",) + (None,) * 14
|
186 |
+
|
187 |
+
# Add transcription
|
188 |
+
progress(0.96, "Transcribing video")
|
189 |
+
transcription_output = transcribe(video_path, transcribe_to_text=True, transcribe_to_srt=False,
|
190 |
+
target_language='en')
|
191 |
+
|
192 |
+
# Parse the transcription output to get sentences and their timecodes
|
193 |
+
sentences_with_timecodes = parse_transcription(transcription_output, video_duration)
|
194 |
+
|
195 |
+
# Get anomaly timecodes
|
196 |
+
anomaly_timecodes_features = [df[df['Frame'] == frame]['Timecode'].iloc[0] for frame in
|
197 |
+
anomaly_frames_embeddings]
|
198 |
+
anomaly_timecodes_posture = [df[df['Frame'] == frame]['Timecode'].iloc[0] for frame in anomaly_frames_posture]
|
199 |
+
|
200 |
+
anomaly_sentences_features = get_sentences_before_anomalies(sentences_with_timecodes,
|
201 |
+
anomaly_timecodes_features)
|
202 |
+
anomaly_sentences_posture = get_sentences_before_anomalies(sentences_with_timecodes,
|
203 |
+
anomaly_timecodes_posture)
|
204 |
+
progress(1.0, "Preparing results")
|
205 |
+
results = f"Number of persons detected: {num_clusters}\n\n"
|
206 |
+
results += "Breakdown:\n"
|
207 |
+
for cluster_id in range(num_clusters):
|
208 |
+
face_count = len([c for c in clusters if c == cluster_id])
|
209 |
+
results += f"Person {cluster_id + 1}: {face_count} face frames\n"
|
210 |
+
|
211 |
+
end_time = time.time()
|
212 |
+
execution_time = end_time - start_time
|
213 |
+
|
214 |
+
def add_timecode_to_image(image, timecode):
|
215 |
+
img_pil = Image.fromarray(image)
|
216 |
+
draw = ImageDraw.Draw(img_pil)
|
217 |
+
font = ImageFont.truetype("arial.ttf", 15)
|
218 |
+
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
219 |
+
return np.array(img_pil)
|
220 |
+
|
221 |
+
anomaly_faces_embeddings = []
|
222 |
+
for frame in anomaly_frames_embeddings:
|
223 |
+
face_path = os.path.join(aligned_faces_folder, f"frame_{frame}_face.jpg")
|
224 |
+
if os.path.exists(face_path):
|
225 |
+
face_img = cv2.imread(face_path)
|
226 |
+
if face_img is not None:
|
227 |
+
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
|
228 |
+
timecode = df[df['Frame'] == frame]['Timecode'].iloc[0]
|
229 |
+
face_img_with_timecode = add_timecode_to_image(face_img, timecode)
|
230 |
+
anomaly_faces_embeddings.append(face_img_with_timecode)
|
231 |
+
|
232 |
+
anomaly_frames_posture_images = []
|
233 |
+
for frame in anomaly_frames_posture:
|
234 |
+
frame_path = os.path.join(frames_folder, f"frame_{frame:04d}.jpg")
|
235 |
+
if os.path.exists(frame_path):
|
236 |
+
frame_img = cv2.imread(frame_path)
|
237 |
+
if frame_img is not None:
|
238 |
+
frame_img = cv2.cvtColor(frame_img, cv2.COLOR_BGR2RGB)
|
239 |
+
pose_results = pose.process(frame_img)
|
240 |
+
if pose_results.pose_landmarks:
|
241 |
+
frame_img = draw_pose_landmarks(frame_img, pose_results.pose_landmarks)
|
242 |
+
timecode = df[df['Frame'] == frame]['Timecode'].iloc[0]
|
243 |
+
frame_img_with_timecode = add_timecode_to_image(frame_img, timecode)
|
244 |
+
anomaly_frames_posture_images.append(frame_img_with_timecode)
|
245 |
+
|
246 |
+
return (
|
247 |
+
execution_time,
|
248 |
+
results,
|
249 |
+
df,
|
250 |
+
mse_embeddings,
|
251 |
+
mse_posture,
|
252 |
+
mse_plot_embeddings,
|
253 |
+
mse_histogram_embeddings,
|
254 |
+
mse_plot_posture,
|
255 |
+
mse_histogram_posture,
|
256 |
+
mse_heatmap_embeddings,
|
257 |
+
mse_heatmap_posture,
|
258 |
+
face_samples["most_frequent"],
|
259 |
+
face_samples["others"],
|
260 |
+
anomaly_faces_embeddings,
|
261 |
+
anomaly_frames_posture_images,
|
262 |
+
aligned_faces_folder,
|
263 |
+
frames_folder,
|
264 |
+
anomaly_sentences_features,
|
265 |
+
anomaly_sentences_posture
|
266 |
+
)
|
267 |
+
|
268 |
+
def is_frontal_face(landmarks, threshold=40):
|
269 |
+
nose_tip = landmarks[4]
|
270 |
+
left_chin = landmarks[234]
|
271 |
+
right_chin = landmarks[454]
|
272 |
+
nose_to_left = [left_chin.x - nose_tip.x, left_chin.y - nose_tip.y]
|
273 |
+
nose_to_right = [right_chin.x - nose_tip.x, right_chin.y - nose_tip.y]
|
274 |
+
dot_product = nose_to_left[0] * nose_to_right[0] + nose_to_left[1] * nose_to_right[1]
|
275 |
+
magnitude_left = math.sqrt(nose_to_left[0] ** 2 + nose_to_left[1] ** 2)
|
276 |
+
magnitude_right = math.sqrt(nose_to_right[0] ** 2 + nose_to_right[1] ** 2)
|
277 |
+
cos_angle = dot_product / (magnitude_left * magnitude_right)
|
278 |
+
angle = math.acos(cos_angle)
|
279 |
+
angle_degrees = math.degrees(angle)
|
280 |
+
return abs(180 - angle_degrees) < threshold
|
281 |
+
|
282 |
+
def save_person_data_to_csv(embeddings_by_frame, clusters, desired_fps, original_fps, output_folder, video_duration):
|
283 |
+
person_data = {}
|
284 |
+
|
285 |
+
for (frame_num, embedding), cluster in zip(embeddings_by_frame.items(), clusters):
|
286 |
+
if cluster not in person_data:
|
287 |
+
person_data[cluster] = []
|
288 |
+
person_data[cluster].append((frame_num, embedding))
|
289 |
+
|
290 |
+
largest_cluster = max(person_data, key=lambda k: len(person_data[k]))
|
291 |
+
|
292 |
+
data = person_data[largest_cluster]
|
293 |
+
data.sort(key=lambda x: x[0])
|
294 |
+
frames, embeddings = zip(*data)
|
295 |
+
|
296 |
+
embeddings_array = np.array(embeddings)
|
297 |
+
np.save(os.path.join(output_folder, 'face_embeddings.npy'), embeddings_array)
|
298 |
+
|
299 |
+
total_frames = max(frames)
|
300 |
+
timecodes = [frame_to_timecode(frame, total_frames, video_duration) for frame in frames]
|
301 |
+
|
302 |
+
df_data = {
|
303 |
+
'Frame': frames,
|
304 |
+
'Timecode': timecodes,
|
305 |
+
'Embedding_Index': range(len(embeddings))
|
306 |
+
}
|
307 |
+
|
308 |
+
for i in range(len(embeddings[0])):
|
309 |
+
df_data[f'Raw_Embedding_{i}'] = [embedding[i] for embedding in embeddings]
|
310 |
+
|
311 |
+
df = pd.DataFrame(df_data)
|
312 |
+
|
313 |
+
return df, largest_cluster
|
314 |
+
|
315 |
+
def get_all_face_samples(organized_faces_folder, output_folder, largest_cluster, max_samples=100):
|
316 |
+
face_samples = {"most_frequent": [], "others": []}
|
317 |
+
for cluster_folder in sorted(os.listdir(organized_faces_folder)):
|
318 |
+
if cluster_folder.startswith("person_"):
|
319 |
+
person_folder = os.path.join(organized_faces_folder, cluster_folder)
|
320 |
+
face_files = sorted([f for f in os.listdir(person_folder) if f.endswith('.jpg')])
|
321 |
+
if face_files:
|
322 |
+
cluster_id = int(cluster_folder.split('_')[1])
|
323 |
+
if cluster_id == largest_cluster:
|
324 |
+
for i, sample in enumerate(face_files[:max_samples]):
|
325 |
+
face_path = os.path.join(person_folder, sample)
|
326 |
+
output_path = os.path.join(output_folder, f"face_sample_most_frequent_{i:04d}.jpg")
|
327 |
+
face_img = cv2.imread(face_path)
|
328 |
+
if face_img is not None:
|
329 |
+
small_face = cv2.resize(face_img, (160, 160))
|
330 |
+
cv2.imwrite(output_path, small_face)
|
331 |
+
face_samples["most_frequent"].append(output_path)
|
332 |
+
if len(face_samples["most_frequent"]) >= max_samples:
|
333 |
+
break
|
334 |
+
else:
|
335 |
+
remaining_samples = max_samples - len(face_samples["others"])
|
336 |
+
if remaining_samples > 0:
|
337 |
+
for i, sample in enumerate(face_files[:remaining_samples]):
|
338 |
+
face_path = os.path.join(person_folder, sample)
|
339 |
+
output_path = os.path.join(output_folder, f"face_sample_other_{cluster_id:02d}_{i:04d}.jpg")
|
340 |
+
face_img = cv2.imread(face_path)
|
341 |
+
if face_img is not None:
|
342 |
+
small_face = cv2.resize(face_img, (160, 160))
|
343 |
+
cv2.imwrite(output_path, small_face)
|
344 |
+
face_samples["others"].append(output_path)
|
345 |
+
if len(face_samples["others"]) >= max_samples:
|
346 |
+
break
|
347 |
+
return face_samples
|
348 |
+
|
349 |
+
|