N3tron commited on
Commit
cf2adbe
·
verified ·
1 Parent(s): bc00f93

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import matplotlib.pyplot as plt
3
+ import streamlit as st
4
+ from deepface import DeepFace
5
+ import mediapipe
6
+ import os
7
+ import tempfile
8
+
9
+ backends = [
10
+ 'opencv',
11
+ 'ssd',
12
+ 'dlib',
13
+ 'mtcnn',
14
+ 'fastmtcnn',
15
+ 'retinaface',
16
+ 'mediapipe',
17
+ 'yolov8',
18
+ 'yunet',
19
+ 'centerface',
20
+ ]
21
+ metrics = ["cosine", "euclidean", "euclidean_l2"]
22
+ models = [
23
+ "VGG-Face",
24
+ "Facenet",
25
+ "Facenet512",
26
+ "OpenFace",
27
+ "DeepFace",
28
+ "DeepID",
29
+ "ArcFace",
30
+ "Dlib",
31
+ "SFace",
32
+ "GhostFaceNet",
33
+ ]
34
+
35
+ def verify(img1, img2, model_name, backend, metric):
36
+ # Save the uploaded images to temporary files
37
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img1:
38
+ temp_img1.write(img1.read())
39
+ temp_img1_path = temp_img1.name
40
+
41
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img2:
42
+ temp_img2.write(img2.read())
43
+ temp_img2_path = temp_img2.name
44
+
45
+ img1p = cv2.imread(temp_img1_path)
46
+ img2p = cv2.imread(temp_img2_path)
47
+
48
+ face_detect = mediapipe.solutions.face_detection
49
+ face_detector = face_detect.FaceDetection(min_detection_confidence=0.6)
50
+
51
+ width1, height1 = img1p.shape[1], img1p.shape[0]
52
+ width2, height2 = img2p.shape[1], img2p.shape[0]
53
+
54
+ result1 = face_detector.process(img1p)
55
+ result2 = face_detector.process(img2p)
56
+ if result2.detections is not None:
57
+ for face in result1.detections:
58
+ if face.score[0] > 0.80:
59
+ bounding_box = face.location_data.relative_bounding_box
60
+ x = int(bounding_box.xmin * width1)
61
+ w = int(bounding_box.width * width1)
62
+ y = int(bounding_box.ymin * height1)
63
+ h = int(bounding_box.height * height1)
64
+ cv2.rectangle(img1p, (x, y), (x+w, y+h), color=(126, 133, 128), thickness=10)
65
+ if result2.detections is not None:
66
+ for face in result2.detections:
67
+ if face.score[0] > 0.80:
68
+ bounding_box = face.location_data.relative_bounding_box
69
+ x = int(bounding_box.xmin * width2)
70
+ w = int(bounding_box.width * width2)
71
+ y = int(bounding_box.ymin * height2)
72
+ h = int(bounding_box.height * height2)
73
+ cv2.rectangle(img2p, (x, y), (x+w, y+h), color=(126, 133, 128), thickness=10)
74
+
75
+ st.image([img1p, img2p], caption=["Image 1", "Image 2"], width=200)
76
+
77
+ face = DeepFace.verify(img1p, img2p, model_name=model_name, detector_backend=backend, distance_metric=metric)
78
+ verification = face["verified"]
79
+
80
+ if verification:
81
+ st.write("Matched")
82
+ else:
83
+ st.write("Not Matched")
84
+
85
+ # Streamlit app
86
+ def main():
87
+ st.title("Face Verification App")
88
+ tab_selection = st.sidebar.selectbox("Select Functionality", ["Face Verification", "Face Recognition", "Celebrity Lookalike", "Age and Emotions Detection"])
89
+
90
+ if tab_selection == "Face Verification":
91
+ st.header("Face Verification")
92
+ model_name = st.selectbox("Select Model", models)
93
+ backend = st.selectbox("Select Backend", backends)
94
+ metric = st.selectbox("Select Metric", metrics)
95
+
96
+ uploaded_img1 = st.file_uploader("Upload Image 1", type=["jpg", "png"])
97
+ uploaded_img2 = st.file_uploader("Upload Image 2", type=["jpg", "png"])
98
+
99
+ if uploaded_img1 and uploaded_img2:
100
+ if st.button("Verify Faces"):
101
+ verify(uploaded_img1, uploaded_img2, model_name, backend, metric)
102
+
103
+ # Run the app
104
+ if __name__ == "__main__":
105
+ main()