Update face_detection.py
Browse files- face_detection.py +85 -113
face_detection.py
CHANGED
@@ -1,17 +1,14 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
import dlib
|
4 |
import numpy as np
|
5 |
-
import
|
6 |
-
from PIL import Image
|
7 |
-
from PIL import ImageOps
|
8 |
-
from scipy.ndimage import gaussian_filter
|
9 |
import cv2
|
10 |
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
|
16 |
def align(image_in, face_index=0, output_size=256):
|
17 |
try:
|
@@ -30,111 +27,86 @@ def align(image_in, face_index=0, output_size=256):
|
|
30 |
|
31 |
return aligned_image, n_faces, quad
|
32 |
|
33 |
-
|
34 |
-
def composite_images(quad, img, output):
|
35 |
-
"""Composite an image into and output canvas according to transformed co-ords"""
|
36 |
-
output = output.convert("RGBA")
|
37 |
-
img = img.convert("RGBA")
|
38 |
-
input_size = img.size
|
39 |
-
src = np.array(((0, 0), (0, input_size[1]), input_size, (input_size[0], 0)), dtype=np.float32)
|
40 |
-
dst = np.float32(quad)
|
41 |
-
mtx = cv2.getPerspectiveTransform(dst, src)
|
42 |
-
img = img.transform(output.size, Image.PERSPECTIVE, mtx.flatten(), Image.BILINEAR)
|
43 |
-
output.alpha_composite(img)
|
44 |
-
|
45 |
-
return output.convert("RGB")
|
46 |
-
|
47 |
-
|
48 |
def get_landmarks(image):
|
49 |
"""Get landmarks from PIL image"""
|
50 |
-
|
51 |
-
|
52 |
-
max_size = max(image.size)
|
53 |
-
reduction_scale = int(max_size/512)
|
54 |
-
if reduction_scale == 0:
|
55 |
-
reduction_scale = 1
|
56 |
-
downscaled = image.reduce(reduction_scale)
|
57 |
-
img = np.array(downscaled)
|
58 |
-
detections = detector(img, 0)
|
59 |
-
|
60 |
-
for detection in detections:
|
61 |
-
try:
|
62 |
-
face_landmarks = [(reduction_scale*item.x, reduction_scale*item.y) for item in shape_predictor(img, detection).parts()]
|
63 |
-
yield face_landmarks
|
64 |
-
except Exception as e:
|
65 |
-
print(e)
|
66 |
|
|
|
|
|
|
|
|
|
67 |
|
68 |
def image_align(src_img, face_landmarks, output_size=512, transform_size=2048, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
1 |
+
import insightface
|
|
|
|
|
2 |
import numpy as np
|
3 |
+
from PIL import Image, ImageOps
|
|
|
|
|
|
|
4 |
import cv2
|
5 |
|
6 |
+
# Load InsightFace models
|
7 |
+
detector = insightface.model_zoo.get_model('retinaface_r50_v1')
|
8 |
+
detector.prepare(ctx_id=-1) # Use CPU, set ctx_id=0 for GPU
|
9 |
|
10 |
+
landmark_model = insightface.model_zoo.get_model('2d106det')
|
11 |
+
landmark_model.prepare(ctx_id=-1) # Use CPU, set ctx_id=0 for GPU
|
|
|
12 |
|
13 |
def align(image_in, face_index=0, output_size=256):
|
14 |
try:
|
|
|
27 |
|
28 |
return aligned_image, n_faces, quad
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def get_landmarks(image):
|
31 |
"""Get landmarks from PIL image"""
|
32 |
+
img = np.array(image)
|
33 |
+
bboxes, _ = detector.detect(img, threshold=0.5, scale=1.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
for bbox in bboxes:
|
36 |
+
# Use the landmark model to predict landmarks
|
37 |
+
landmarks = landmark_model.get(img, bbox)
|
38 |
+
yield landmarks
|
39 |
|
40 |
def image_align(src_img, face_landmarks, output_size=512, transform_size=2048, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
|
41 |
+
# Align function modified from ffhq-dataset
|
42 |
+
# See https://github.com/NVlabs/ffhq-dataset for license
|
43 |
+
|
44 |
+
lm = np.array(face_landmarks)
|
45 |
+
lm_eye_left = lm[2:3] # left-clockwise
|
46 |
+
lm_eye_right = lm[0:1] # left-clockwise
|
47 |
+
|
48 |
+
# Calculate auxiliary vectors.
|
49 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
50 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
51 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
52 |
+
eye_to_eye = 0.71 * (eye_right - eye_left)
|
53 |
+
mouth_avg = lm[4]
|
54 |
+
eye_to_mouth = 1.35 * (mouth_avg - eye_avg)
|
55 |
+
|
56 |
+
# Choose oriented crop rectangle.
|
57 |
+
x = eye_to_eye.copy()
|
58 |
+
x /= np.hypot(*x)
|
59 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
|
60 |
+
x *= x_scale
|
61 |
+
y = np.flipud(x) * [-y_scale, y_scale]
|
62 |
+
c = eye_avg + eye_to_mouth * em_scale
|
63 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
64 |
+
quad_orig = quad.copy()
|
65 |
+
qsize = np.hypot(*x) * 2
|
66 |
+
|
67 |
+
img = src_img.convert('RGBA').convert('RGB')
|
68 |
+
|
69 |
+
# Shrink.
|
70 |
+
shrink = int(np.floor(qsize / output_size * 0.5))
|
71 |
+
if shrink > 1:
|
72 |
+
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
|
73 |
+
img = img.resize(rsize, Image.Resampling.LANCZOS)
|
74 |
+
quad /= shrink
|
75 |
+
qsize /= shrink
|
76 |
+
|
77 |
+
# Crop.
|
78 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
79 |
+
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
|
80 |
+
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
|
81 |
+
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
|
82 |
+
img = img.crop(crop)
|
83 |
+
quad -= crop[0:2]
|
84 |
+
|
85 |
+
# Pad.
|
86 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
|
87 |
+
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
|
88 |
+
if enable_padding and max(pad) > border - 4:
|
89 |
+
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
|
90 |
+
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
91 |
+
h, w, _ = img.shape
|
92 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
93 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
|
94 |
+
blur = qsize * 0.02
|
95 |
+
img += (gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
96 |
+
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
|
97 |
+
img = np.uint8(np.clip(np.rint(img), 0, 255))
|
98 |
+
if alpha:
|
99 |
+
mask = 1 - np.clip(3.0 * mask, 0.0, 1.0)
|
100 |
+
mask = np.uint8(np.clip(np.rint(mask * 255), 0, 255))
|
101 |
+
img = np.concatenate((img, mask), axis=2)
|
102 |
+
img = Image.fromarray(img, 'RGBA')
|
103 |
+
else:
|
104 |
+
img = Image.fromarray(img, 'RGB')
|
105 |
+
quad += pad[:2]
|
106 |
+
|
107 |
+
# Transform.
|
108 |
+
img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)
|
109 |
+
if output_size < transform_size:
|
110 |
+
img = img.resize((output_size, output_size), Image.Resampling.LANCZOS)
|
111 |
+
|
112 |
+
return img, quad_orig
|