Spaces:
Running
Running
paresh95
commited on
Commit
·
0347340
1
Parent(s):
c3143ea
PS | Improve UI
Browse files- app.py +58 -15
- parameters.yml +26 -0
- src/cv_utils.py +8 -0
- src/face_proportions.py +3 -2
- src/face_symmetry.py +4 -3
- src/face_texture.py +5 -2
app.py
CHANGED
@@ -1,36 +1,79 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
3 |
from src.face_texture import GetFaceTexture
|
4 |
from src.face_symmetry import GetFaceSymmetry
|
5 |
from src.face_demographics import GetFaceDemographics
|
6 |
from src.face_proportions import GetFaceProportions
|
7 |
|
8 |
|
9 |
-
def combined_fn(input_image):
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
gigi_hadid = os.path.join(os.path.dirname(__file__), "data/gigi_hadid.webp")
|
|
|
17 |
|
18 |
iface = gr.Interface(
|
19 |
fn=combined_fn,
|
20 |
-
inputs=
|
|
|
|
|
|
|
21 |
outputs=[
|
|
|
|
|
22 |
gr.Image(type="pil", label="Extracted face"),
|
23 |
-
gr.Image(type="pil", label="Extracted face texture"),
|
24 |
-
"json",
|
25 |
-
gr.Image(type="pil", label="Face symmetry"),
|
26 |
-
"json",
|
27 |
-
"json",
|
28 |
-
"json",
|
29 |
-
"json",
|
30 |
gr.Image(type="pil", label="Face landmarks"),
|
|
|
|
|
31 |
],
|
32 |
title="Advanced Facial Feature Detector",
|
33 |
-
description=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
theme=gr.themes.Soft(),
|
35 |
live=False,
|
36 |
)
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
+
import yaml
|
4 |
from src.face_texture import GetFaceTexture
|
5 |
from src.face_symmetry import GetFaceSymmetry
|
6 |
from src.face_demographics import GetFaceDemographics
|
7 |
from src.face_proportions import GetFaceProportions
|
8 |
|
9 |
|
10 |
+
def combined_fn(input_image, input_image_2):
|
11 |
+
demographics_dict = GetFaceDemographics().main(input_image)
|
12 |
+
golden_ratios_dict, equal_ratios_dict, face_landmarks_image = GetFaceProportions().main(input_image)
|
13 |
+
face_symmetry_image, symmetry_dict = GetFaceSymmetry().main(input_image)
|
14 |
+
face_image, face_texture_image, texture_dict = GetFaceTexture().main(input_image)
|
15 |
+
|
16 |
+
results = {
|
17 |
+
"Demographic predictions": demographics_dict,
|
18 |
+
"Face proportions (golden ratio)": golden_ratios_dict,
|
19 |
+
"Face proportions (equal ratio)": equal_ratios_dict,
|
20 |
+
"Face symmetry metrics": symmetry_dict,
|
21 |
+
"Face texture metrics": texture_dict
|
22 |
+
}
|
23 |
+
with open("parameters.yml", 'r') as file:
|
24 |
+
data = yaml.safe_load(file)
|
25 |
+
results_interpretation = data["results_interpretation"]
|
26 |
+
|
27 |
+
return (results, results_interpretation, face_image, face_landmarks_image, face_symmetry_image, face_texture_image)
|
28 |
|
29 |
gigi_hadid = os.path.join(os.path.dirname(__file__), "data/gigi_hadid.webp")
|
30 |
+
jay_z = os.path.join(os.path.dirname(__file__), "data/jay_z.jpg")
|
31 |
|
32 |
iface = gr.Interface(
|
33 |
fn=combined_fn,
|
34 |
+
inputs=[
|
35 |
+
gr.Image(type="pil", label="Upload Face 1", value=jay_z),
|
36 |
+
gr.Image(type="pil", label="Upload Face 2", value=gigi_hadid)
|
37 |
+
],
|
38 |
outputs=[
|
39 |
+
gr.JSON(label="Results"),
|
40 |
+
gr.JSON(label="Results explainer"),
|
41 |
gr.Image(type="pil", label="Extracted face"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
gr.Image(type="pil", label="Face landmarks"),
|
43 |
+
gr.Image(type="pil", label="Face symmetry"),
|
44 |
+
gr.Image(type="pil", label="Extracted face texture"),
|
45 |
],
|
46 |
title="Advanced Facial Feature Detector",
|
47 |
+
description=
|
48 |
+
"""
|
49 |
+
<!DOCTYPE html>
|
50 |
+
<html lang="en">
|
51 |
+
<head>
|
52 |
+
<meta charset="UTF-8">
|
53 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
54 |
+
<title>JSON Output in HTML</title>
|
55 |
+
<style>
|
56 |
+
.section {
|
57 |
+
margin-bottom: 20px;
|
58 |
+
}
|
59 |
+
</style>
|
60 |
+
</head>
|
61 |
+
<body>
|
62 |
+
|
63 |
+
<div class="section">
|
64 |
+
<p><strong>Description:</strong> This tool analyses a facial image to predict age and gender, assess symmetry, evaluate proportions, and examine texture.</p>
|
65 |
+
<p><strong>Instructions:</strong> For optimal results, upload a clear front-facing image (see example image). To do so, either drag and drop your photo or click on "Upload Face Image", then press 'Submit'.</p>
|
66 |
+
<p><strong>Interpreting the results:</strong></p>
|
67 |
+
<p><strong>Other information:</strong></p>
|
68 |
+
<ul>
|
69 |
+
<li>No uploaded photo is stored.</li>
|
70 |
+
<li>The output will take several seconds to compute.</li>
|
71 |
+
<li>If an error occurs try again or try a different photo or angle.</li>
|
72 |
+
</ul>
|
73 |
+
</div>
|
74 |
+
</body>
|
75 |
+
</html>
|
76 |
+
""",
|
77 |
theme=gr.themes.Soft(),
|
78 |
live=False,
|
79 |
)
|
parameters.yml
CHANGED
@@ -9,3 +9,29 @@ face_gender:
|
|
9 |
model: "models/face_gender/gender_net.caffemodel"
|
10 |
face_landmarks:
|
11 |
model: 'models/face_alignment/shape_predictor_68_face_landmarks.dat'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
model: "models/face_gender/gender_net.caffemodel"
|
10 |
face_landmarks:
|
11 |
model: 'models/face_alignment/shape_predictor_68_face_landmarks.dat'
|
12 |
+
results_interpretation:
|
13 |
+
Demographic predictions:
|
14 |
+
age_range: "Predicted age"
|
15 |
+
age_confidence: "Confidence of age prediction (0-1)"
|
16 |
+
gender: "Predicted gender"
|
17 |
+
gender_confidence: "Confidence of gender prediction (0-1)"
|
18 |
+
Face proportions (golden ratio):
|
19 |
+
Ideal ratio (golden ratio): "The ideal facial proportion ratio according to the classical Greek work on maths and geometry. Ideal facial features ratios in this section should be 1:1.62 (the golden ratio)"
|
20 |
+
Top of nose to middle of mouth vs middle mouth to bottom of chin: "See description"
|
21 |
+
Middle of mouth to bottom of mouth vs top of mouth to middle of mouth: "See description"
|
22 |
+
Face proportions (equal ratio):
|
23 |
+
Ideal ratio: "Typical facial features ratios in this section should be 1:1 (equal ratio)"
|
24 |
+
Eye width vs distance between eyes: "See description"
|
25 |
+
Eye to eyebrows vs eye height: "See description"
|
26 |
+
Center of left to right eye vs mouth width: "See description"
|
27 |
+
Face symmetry metrics:
|
28 |
+
structural_similarity: "Range: -1 (opposite) to 1 (similar). Considers differences in structural information, luminance, and texture."
|
29 |
+
cosine_distance: "Ranges: -1 to 1. 0 =similar, -1 or 1 = not similar. Considers differences in pixels."
|
30 |
+
mse: Lower means more similar. Minimum value is 0, maximum is unbounded. Measures pixel differences."
|
31 |
+
mae: Lower means more similar. Minimum value is 0, maximum is unbounded. Measures pixel differences."
|
32 |
+
histogram_correlation: "Range: -1 (opposite) to 1 (similar). Considers similarity based on color or intensity distributions."
|
33 |
+
histogram_intersection: "Range: 0 (similar) to total pixels in image (not similar). Consider similarity based on color or intensity distributions."
|
34 |
+
orb_detector_matches: "Higher is better. Counts the number of matches between keypoints in images."
|
35 |
+
pixel_difference: "Lower is better. Minimum value is 0, maximum is unbounded."
|
36 |
+
Face texture metrics:
|
37 |
+
Texture std: "Lower means less varied facial texture. Minimum is 0, maximum is unbounded."
|
src/cv_utils.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import cv2
|
2 |
import numpy as np
|
|
|
3 |
|
4 |
|
5 |
def get_image(image_input) -> np.array:
|
@@ -9,3 +10,10 @@ def get_image(image_input) -> np.array:
|
|
9 |
else:
|
10 |
image = cv2.cvtColor(np.array(image_input), cv2.COLOR_RGB2BGR) # PIL uses RGB
|
11 |
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
+
from PIL import Image as PILImage
|
4 |
|
5 |
|
6 |
def get_image(image_input) -> np.array:
|
|
|
10 |
else:
|
11 |
image = cv2.cvtColor(np.array(image_input), cv2.COLOR_RGB2BGR) # PIL uses RGB
|
12 |
return image
|
13 |
+
|
14 |
+
|
15 |
+
def resize_image_height(image: PILImage.Image, new_height=300) -> PILImage.Image:
|
16 |
+
aspect_ratio = image.width / image.height
|
17 |
+
new_width = int(aspect_ratio * new_height)
|
18 |
+
image = image.resize((new_width, new_height))
|
19 |
+
return image
|
src/face_proportions.py
CHANGED
@@ -4,7 +4,7 @@ import cv2
|
|
4 |
import os
|
5 |
import numpy as np
|
6 |
import imutils
|
7 |
-
from src.cv_utils import get_image
|
8 |
from typing import List, Union
|
9 |
from PIL import Image as PILImage
|
10 |
|
@@ -110,7 +110,8 @@ class GetFaceProportions:
|
|
110 |
golden_ratios = {k: round(v, 2) for k, v in golden_ratios.items()}
|
111 |
equal_ratios = self.compute_equal_ratios(shape)
|
112 |
equal_ratios = {k: round(v, 2) for k, v in equal_ratios.items()}
|
113 |
-
image = PILImage.fromarray(image)
|
|
|
114 |
return golden_ratios, equal_ratios, image
|
115 |
|
116 |
|
|
|
4 |
import os
|
5 |
import numpy as np
|
6 |
import imutils
|
7 |
+
from src.cv_utils import get_image, resize_image_height
|
8 |
from typing import List, Union
|
9 |
from PIL import Image as PILImage
|
10 |
|
|
|
110 |
golden_ratios = {k: round(v, 2) for k, v in golden_ratios.items()}
|
111 |
equal_ratios = self.compute_equal_ratios(shape)
|
112 |
equal_ratios = {k: round(v, 2) for k, v in equal_ratios.items()}
|
113 |
+
image = PILImage.fromarray(image)
|
114 |
+
image = resize_image_height(image, new_height=300)
|
115 |
return golden_ratios, equal_ratios, image
|
116 |
|
117 |
|
src/face_symmetry.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
-
from src.cv_utils import get_image
|
4 |
from typing import Tuple, List, Union
|
5 |
from skimage.metrics import structural_similarity as ssim
|
6 |
from scipy.spatial import distance
|
@@ -132,9 +132,10 @@ class GetFaceSymmetry:
|
|
132 |
lowest_mse = d["mse"]
|
133 |
|
134 |
full_face = np.hstack((best_left_half, best_right_half))
|
135 |
-
|
|
|
136 |
best_face_data = {k: float(round(v, 2)) for k, v in best_face_data.items()}
|
137 |
-
return
|
138 |
|
139 |
|
140 |
if __name__ == "__main__":
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
+
from src.cv_utils import get_image, resize_image_height
|
4 |
from typing import Tuple, List, Union
|
5 |
from skimage.metrics import structural_similarity as ssim
|
6 |
from scipy.spatial import distance
|
|
|
132 |
lowest_mse = d["mse"]
|
133 |
|
134 |
full_face = np.hstack((best_left_half, best_right_half))
|
135 |
+
full_face_image = PILImage.fromarray(full_face)
|
136 |
+
full_face_image = resize_image_height(full_face_image, new_height=300)
|
137 |
best_face_data = {k: float(round(v, 2)) for k, v in best_face_data.items()}
|
138 |
+
return full_face_image, best_face_data
|
139 |
|
140 |
|
141 |
if __name__ == "__main__":
|
src/face_texture.py
CHANGED
@@ -4,7 +4,7 @@ from skimage.feature import local_binary_pattern
|
|
4 |
import dlib
|
5 |
import imutils
|
6 |
from PIL import Image as PILImage
|
7 |
-
from src.cv_utils import get_image
|
8 |
from typing import Tuple, List, Union
|
9 |
|
10 |
|
@@ -51,12 +51,15 @@ class GetFaceTexture:
|
|
51 |
lbp = (lbp * 255).astype(np.uint8)
|
52 |
return PILImage.fromarray(lbp)
|
53 |
|
54 |
-
def main(self, image_input) -> List[Union[PILImage.Image,
|
55 |
image = get_image(image_input)
|
56 |
gray_image = self.preprocess_image(image)
|
57 |
face_image = self.get_face(gray_image)
|
58 |
lbp, std = self.get_face_texture(face_image)
|
59 |
face_texture_image = self.postprocess_image(lbp)
|
|
|
|
|
|
|
60 |
return face_image, face_texture_image, {"Texture std": round(std, 2)}
|
61 |
|
62 |
|
|
|
4 |
import dlib
|
5 |
import imutils
|
6 |
from PIL import Image as PILImage
|
7 |
+
from src.cv_utils import get_image, resize_image_height
|
8 |
from typing import Tuple, List, Union
|
9 |
|
10 |
|
|
|
51 |
lbp = (lbp * 255).astype(np.uint8)
|
52 |
return PILImage.fromarray(lbp)
|
53 |
|
54 |
+
def main(self, image_input) -> List[Union[PILImage.Image, PILImage.Image, dict]]:
|
55 |
image = get_image(image_input)
|
56 |
gray_image = self.preprocess_image(image)
|
57 |
face_image = self.get_face(gray_image)
|
58 |
lbp, std = self.get_face_texture(face_image)
|
59 |
face_texture_image = self.postprocess_image(lbp)
|
60 |
+
face_image = PILImage.fromarray(face_image)
|
61 |
+
face_image = resize_image_height(face_image, new_height=300)
|
62 |
+
face_texture_image = resize_image_height(face_texture_image, new_height=300)
|
63 |
return face_image, face_texture_image, {"Texture std": round(std, 2)}
|
64 |
|
65 |
|