Update utils.py
Browse files
utils.py
CHANGED
@@ -27,7 +27,6 @@ def add_timecode_to_image(image, timecode):
|
|
27 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
28 |
return np.array(img_pil)
|
29 |
|
30 |
-
|
31 |
def add_timecode_to_image_body(image, timecode):
|
32 |
from PIL import Image, ImageDraw, ImageFont
|
33 |
import numpy as np
|
@@ -40,6 +39,7 @@ def add_timecode_to_image_body(image, timecode):
|
|
40 |
|
41 |
def create_annotated_video(video_path, df, mse_embeddings, largest_cluster, output_path):
|
42 |
import cv2
|
|
|
43 |
video = cv2.VideoCapture(video_path)
|
44 |
fps = video.get(cv2.CAP_PROP_FPS)
|
45 |
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
@@ -52,6 +52,7 @@ def create_annotated_video(video_path, df, mse_embeddings, largest_cluster, outp
|
|
52 |
if not ret:
|
53 |
break
|
54 |
# Detect face and draw bounding box
|
|
|
55 |
boxes, _ = mtcnn.detect(frame)
|
56 |
if boxes is not None and len(boxes) > 0:
|
57 |
box = boxes[0]
|
|
|
27 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
28 |
return np.array(img_pil)
|
29 |
|
|
|
30 |
def add_timecode_to_image_body(image, timecode):
|
31 |
from PIL import Image, ImageDraw, ImageFont
|
32 |
import numpy as np
|
|
|
39 |
|
40 |
def create_annotated_video(video_path, df, mse_embeddings, largest_cluster, output_path):
|
41 |
import cv2
|
42 |
+
from facenet_pytorch import MTCNN
|
43 |
video = cv2.VideoCapture(video_path)
|
44 |
fps = video.get(cv2.CAP_PROP_FPS)
|
45 |
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
|
52 |
if not ret:
|
53 |
break
|
54 |
# Detect face and draw bounding box
|
55 |
+
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.9, 0.9, 0.9], min_face_size=50)
|
56 |
boxes, _ = mtcnn.detect(frame)
|
57 |
if boxes is not None and len(boxes) > 0:
|
58 |
box = boxes[0]
|