File size: 3,059 Bytes
2b50de4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e257fc3
2b50de4
 
 
 
 
 
 
 
 
 
 
e257fc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

def frame_to_timecode(frame_num, total_frames, duration):
    total_seconds = (frame_num / total_frames) * duration
    hours = int(total_seconds // 3600)
    minutes = int((total_seconds % 3600) // 60)
    seconds = int(total_seconds % 60)
    milliseconds = int((total_seconds - int(total_seconds)) * 1000)
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"

def seconds_to_timecode(seconds):
    hours = int(seconds // 3600)
    minutes = int((seconds % 3600) // 60)
    seconds = int(seconds % 60)
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}"

def timecode_to_seconds(timecode):
    h, m, s = map(int, timecode.split(':'))
    return h * 3600 + m * 60 + s

def add_timecode_to_image(image, timecode):
    from PIL import Image, ImageDraw, ImageFont
    import numpy as np

    img_pil = Image.fromarray(image)
    draw = ImageDraw.Draw(img_pil)
    font = ImageFont.truetype("arial.ttf", 15)
    draw.text((10, 10), timecode, (255, 0, 0), font=font)
    return np.array(img_pil)
    

def add_timecode_to_image_body(image, timecode):
    from PIL import Image, ImageDraw, ImageFont
    import numpy as np

    img_pil = Image.fromarray(image)
    draw = ImageDraw.Draw(img_pil)
    font = ImageFont.truetype("arial.ttf", 100)
    draw.text((10, 10), timecode, (255, 0, 0), font=font)
    return np.array(img_pil)

def create_annotated_video(video_path, df, mse_embeddings, largest_cluster, output_path):
    video = cv2.VideoCapture(video_path)
    fps = video.get(cv2.CAP_PROP_FPS)
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    frame_number = 0
    while True:
        ret, frame = video.read()
        if not ret:
            break
        # Detect face and draw bounding box
        boxes, _ = mtcnn.detect(frame)
        if boxes is not None and len(boxes) > 0:
            box = boxes[0]
            cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
        # Draw facial landmarks
        face_mesh_results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        if face_mesh_results.multi_face_landmarks:
            for face_landmarks in face_mesh_results.multi_face_landmarks:
                mp_drawing.draw_landmarks(
                    image=frame,
                    landmark_list=face_landmarks,
                    connections=mp_face_mesh.FACEMESH_TESSELATION,
                    landmark_drawing_spec=None,
                    connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style()
                )
        # Add MSE annotation
        if frame_number in df['Frame'].values:
            mse = mse_embeddings[df['Frame'] == frame_number].iloc[0]
            cv2.putText(frame, f"MSE: {mse:.4f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
        out.write(frame)
        frame_number += 1
    video.release()
    out.release()