Spaces:
Runtime error
Runtime error
Update video_processing.py
Browse files- video_processing.py +7 -7
video_processing.py
CHANGED
@@ -53,7 +53,7 @@ def process_frames(frames_folder, faces_folder, frame_count, progress):
|
|
53 |
for i, frame_file in enumerate(frame_files):
|
54 |
frame_num = int(frame_file.split('_')[1].split('.')[0])
|
55 |
frame_path = os.path.join(frames_folder, frame_file)
|
56 |
-
frame = cv2.imread(frame_path)
|
57 |
|
58 |
if frame is not None:
|
59 |
posture_score, posture_landmarks = calculate_posture_score(frame)
|
@@ -66,10 +66,10 @@ def process_frames(frames_folder, faces_folder, frame_count, progress):
|
|
66 |
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
|
67 |
face = frame[y1:y2, x1:x2]
|
68 |
if face.size > 0:
|
69 |
-
|
70 |
-
face_resized = cv2.resize(face_rgb, (160, 160))
|
71 |
output_path = os.path.join(faces_folder, f"frame_{frame_num}_face.jpg")
|
72 |
cv2.imwrite(output_path, face_resized)
|
|
|
73 |
face_paths.append(output_path)
|
74 |
embedding = get_face_embedding(face_resized)
|
75 |
embeddings_by_frame[frame_num] = embedding
|
@@ -309,10 +309,10 @@ def get_all_face_samples(organized_faces_folder, output_folder, largest_cluster,
|
|
309 |
for i, sample in enumerate(face_files[:max_samples]):
|
310 |
face_path = os.path.join(person_folder, sample)
|
311 |
output_path = os.path.join(output_folder, f"face_sample_most_frequent_{i:04d}.jpg")
|
312 |
-
face_img = cv2.imread(face_path)
|
313 |
if face_img is not None:
|
314 |
small_face = cv2.resize(face_img, (160, 160))
|
315 |
-
cv2.imwrite(output_path, small_face)
|
316 |
face_samples["most_frequent"].append(output_path)
|
317 |
if len(face_samples["most_frequent"]) >= max_samples:
|
318 |
break
|
@@ -322,10 +322,10 @@ def get_all_face_samples(organized_faces_folder, output_folder, largest_cluster,
|
|
322 |
for i, sample in enumerate(face_files[:remaining_samples]):
|
323 |
face_path = os.path.join(person_folder, sample)
|
324 |
output_path = os.path.join(output_folder, f"face_sample_other_{cluster_id:02d}_{i:04d}.jpg")
|
325 |
-
face_img = cv2.imread(face_path)
|
326 |
if face_img is not None:
|
327 |
small_face = cv2.resize(face_img, (160, 160))
|
328 |
-
cv2.imwrite(output_path, small_face)
|
329 |
face_samples["others"].append(output_path)
|
330 |
if len(face_samples["others"]) >= max_samples:
|
331 |
break
|
|
|
53 |
for i, frame_file in enumerate(frame_files):
|
54 |
frame_num = int(frame_file.split('_')[1].split('.')[0])
|
55 |
frame_path = os.path.join(frames_folder, frame_file)
|
56 |
+
frame = cv2.cvtColor(cv2.imread(frame_path), cv2.COLOR_BGR2RGB)
|
57 |
|
58 |
if frame is not None:
|
59 |
posture_score, posture_landmarks = calculate_posture_score(frame)
|
|
|
66 |
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
|
67 |
face = frame[y1:y2, x1:x2]
|
68 |
if face.size > 0:
|
69 |
+
face_resized = cv2.resize(face, (160, 160))
|
|
|
70 |
output_path = os.path.join(faces_folder, f"frame_{frame_num}_face.jpg")
|
71 |
cv2.imwrite(output_path, face_resized)
|
72 |
+
cv2.imwrite(output_path, cv2.cvtColor(face_resized, cv2.COLOR_RGB2BGR))
|
73 |
face_paths.append(output_path)
|
74 |
embedding = get_face_embedding(face_resized)
|
75 |
embeddings_by_frame[frame_num] = embedding
|
|
|
309 |
for i, sample in enumerate(face_files[:max_samples]):
|
310 |
face_path = os.path.join(person_folder, sample)
|
311 |
output_path = os.path.join(output_folder, f"face_sample_most_frequent_{i:04d}.jpg")
|
312 |
+
face_img = cv2.cvtColor(cv2.imread(face_path), cv2.COLOR_BGR2RGB)
|
313 |
if face_img is not None:
|
314 |
small_face = cv2.resize(face_img, (160, 160))
|
315 |
+
cv2.imwrite(output_path, cv2.cvtColor(small_face, cv2.COLOR_RGB2BGR))
|
316 |
face_samples["most_frequent"].append(output_path)
|
317 |
if len(face_samples["most_frequent"]) >= max_samples:
|
318 |
break
|
|
|
322 |
for i, sample in enumerate(face_files[:remaining_samples]):
|
323 |
face_path = os.path.join(person_folder, sample)
|
324 |
output_path = os.path.join(output_folder, f"face_sample_other_{cluster_id:02d}_{i:04d}.jpg")
|
325 |
+
face_img = cv2.cvtColor(cv2.imread(face_path), cv2.COLOR_BGR2RGB)
|
326 |
if face_img is not None:
|
327 |
small_face = cv2.resize(face_img, (160, 160))
|
328 |
+
cv2.imwrite(output_path, cv2.cvtColor(small_face, cv2.COLOR_RGB2BGR))
|
329 |
face_samples["others"].append(output_path)
|
330 |
if len(face_samples["others"]) >= max_samples:
|
331 |
break
|