17skb commited on
Commit
0fef0a0
·
1 Parent(s): c1dff45
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Will later remove that commit
2
+ known_faces/
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Install g++
4
+ RUN apt-get update && apt-get install -y g++
5
+
6
+ WORKDIR /app
7
+
8
+ COPY ./ /app
9
+
10
+ # For GPU support:
11
+ RUN pip3 install --extra-index-url https://download.pytorch.org/whl/cu118 torch torchvision
12
+
13
+ RUN pip install -r requirements.txt
14
+
15
+
16
+ CMD fastapi run --reload --host=0.0.0.0 --port=7860
Dockerfile.update ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ FROM attend_b-hf
2
+
3
+ RUN pip install python-multipart opencv-python-headless pillow
__pycache__/app.cpython-310.pyc ADDED
Binary file (3.39 kB). View file
 
__pycache__/face_recognition_system.cpython-310.pyc ADDED
Binary file (4.29 kB). View file
 
activate_env.bat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ @echo off
2
+ conda activate E:\INTERNSHIP\ongc\USING_DEEPLEARNING\table-transformer\env
app.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #4:
2
+ from fastapi import FastAPI, File, UploadFile, HTTPException
3
+ from fastapi.responses import JSONResponse
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ import numpy as np
6
+ import cv2
7
+ import base64
8
+ import logging
9
+ import os
10
+ from pathlib import Path
11
+ from face_recognition_system import FaceRecognitionSystem
12
+
13
+ # Set up logging
14
+ logging.basicConfig(
15
+ level=logging.INFO,
16
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
17
+ )
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # Initialize FastAPI app
21
+ app = FastAPI(
22
+ title="Face Recognition API",
23
+ description="API for face detection and recognition using InsightFace",
24
+ version="1.0.0"
25
+ )
26
+
27
+ # Add CORS middleware for Hugging Face Spaces
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=["*"],
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+ # Create necessary directories
37
+ MODELS_DIR = Path("models")
38
+ KNOWN_FACES_DIR = Path("known_faces")
39
+ for directory in [MODELS_DIR, KNOWN_FACES_DIR]:
40
+ directory.mkdir(parents=True, exist_ok=True)
41
+
42
+ # Initialize face recognition system
43
+ try:
44
+ face_recog_system = FaceRecognitionSystem(
45
+ model_name="buffalo_l",
46
+ model_root=str(MODELS_DIR)
47
+ )
48
+ face_recog_system.process_known_faces(str(KNOWN_FACES_DIR))
49
+ logger.info("Face recognition system initialized successfully")
50
+ except Exception as e:
51
+ logger.error(f"Failed to initialize face recognition system: {e}")
52
+ raise
53
+
54
+ @app.get("/")
55
+ async def root():
56
+ """Health check endpoint"""
57
+ model_files = list(MODELS_DIR.glob("*"))
58
+ known_faces = list(KNOWN_FACES_DIR.glob("*"))
59
+ return {
60
+ "status": "ok",
61
+ "message": "Face Recognition API is running",
62
+ "model_directory": str(MODELS_DIR),
63
+ "known_faces_directory": str(KNOWN_FACES_DIR),
64
+ "model_files": [str(f.name) for f in model_files],
65
+ "known_faces": [str(f.name) for f in known_faces]
66
+ }
67
+
68
+ @app.post("/detect_faces")
69
+ async def detect_faces(file: UploadFile = File(...)):
70
+ """
71
+ Endpoint to detect and identify faces in an uploaded image
72
+ """
73
+ try:
74
+ # Validate file type
75
+ if not file.content_type.startswith('image/'):
76
+ raise HTTPException(status_code=400, detail="File must be an image")
77
+
78
+ # Read and decode image
79
+ image_data = await file.read()
80
+ nparr = np.frombuffer(image_data, np.uint8)
81
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
82
+
83
+ if img is None:
84
+ raise HTTPException(status_code=400, detail="Failed to decode image")
85
+
86
+ # Process image
87
+ detected_img = face_recog_system.detect_and_identify(img)
88
+
89
+ # Encode processed image to base64
90
+ success, buffer = cv2.imencode('.jpg', detected_img)
91
+ if not success:
92
+ raise HTTPException(status_code=500, detail="Failed to encode processed image")
93
+
94
+ processed_image_base64 = base64.b64encode(buffer).decode("utf-8")
95
+
96
+ # Prepare response
97
+ serializable_embeddings = {
98
+ name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding
99
+ for name, embedding in face_recog_system.known_face_embeddings.items()
100
+ }
101
+
102
+ return JSONResponse(content={
103
+ "status": "success",
104
+ "processed_image": processed_image_base64,
105
+ "faces": serializable_embeddings
106
+ })
107
+
108
+ except HTTPException as he:
109
+ raise he
110
+ except Exception as e:
111
+ logger.error(f"Error processing image: {e}")
112
+ raise HTTPException(status_code=500, detail=str(e))
113
+
114
+ # Configuration for Hugging Face Spaces
115
+ if __name__ == "__main__":
116
+ import uvicorn
117
+ uvicorn.run(app, host="0.0.0.0", port=7860)
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+ #3:
139
+
140
+ # from fastapi import FastAPI, File, UploadFile, HTTPException
141
+ # from fastapi.responses import JSONResponse
142
+ # from fastapi.middleware.cors import CORSMiddleware
143
+ # import numpy as np
144
+ # import cv2
145
+ # import base64
146
+ # import logging
147
+ # from face_recognition_system import FaceRecognitionSystem
148
+
149
+ # # Set up logging
150
+ # logging.basicConfig(
151
+ # level=logging.INFO,
152
+ # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
153
+ # )
154
+ # logger = logging.getLogger(__name__)
155
+
156
+ # # Initialize FastAPI app
157
+ # app = FastAPI(
158
+ # title="Face Recognition API",
159
+ # description="API for face detection and recognition using InsightFace",
160
+ # version="1.0.0"
161
+ # )
162
+
163
+ # # Add CORS middleware for Hugging Face Spaces
164
+ # app.add_middleware(
165
+ # CORSMiddleware,
166
+ # allow_origins=["*"],
167
+ # allow_credentials=True,
168
+ # allow_methods=["*"],
169
+ # allow_headers=["*"],
170
+ # )
171
+
172
+ # # Initialize face recognition system
173
+ # try:
174
+ # face_recog_system = FaceRecognitionSystem()
175
+ # # Update the path to match your Hugging Face Spaces directory structure
176
+ # face_recog_system.process_known_faces("known_faces")
177
+ # logger.info("Face recognition system initialized successfully")
178
+ # except Exception as e:
179
+ # logger.error(f"Failed to initialize face recognition system: {e}")
180
+ # raise
181
+
182
+ # @app.get("/")
183
+ # async def root():
184
+ # """Health check endpoint"""
185
+ # return {"status": "ok", "message": "Face Recognition API is running"}
186
+
187
+ # @app.post("/detect_faces")
188
+ # async def detect_faces(file: UploadFile = File(...)):
189
+ # """
190
+ # Endpoint to detect and identify faces in an uploaded image
191
+ # """
192
+ # try:
193
+ # # Validate file type
194
+ # if not file.content_type.startswith('image/'):
195
+ # raise HTTPException(status_code=400, detail="File must be an image")
196
+
197
+ # # Read and decode image
198
+ # image_data = await file.read()
199
+ # nparr = np.frombuffer(image_data, np.uint8)
200
+ # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
201
+
202
+ # if img is None:
203
+ # raise HTTPException(status_code=400, detail="Failed to decode image")
204
+
205
+ # # Process image
206
+ # detected_img = face_recog_system.detect_and_identify(img)
207
+
208
+ # # Encode processed image to base64
209
+ # success, buffer = cv2.imencode('.jpg', detected_img)
210
+ # if not success:
211
+ # raise HTTPException(status_code=500, detail="Failed to encode processed image")
212
+
213
+ # processed_image_base64 = base64.b64encode(buffer).decode("utf-8")
214
+
215
+ # # Prepare response
216
+ # serializable_embeddings = {
217
+ # name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding
218
+ # for name, embedding in face_recog_system.known_face_embeddings.items()
219
+ # }
220
+
221
+ # return JSONResponse(content={
222
+ # "status": "success",
223
+ # "processed_image": processed_image_base64,
224
+ # "faces": serializable_embeddings
225
+ # })
226
+
227
+ # except HTTPException as he:
228
+ # raise he
229
+ # except Exception as e:
230
+ # logger.error(f"Error processing image: {e}")
231
+ # raise HTTPException(status_code=500, detail=str(e))
232
+
233
+ # # Configuration for Hugging Face Spaces
234
+ # if __name__ == "__main__":
235
+ # import uvicorn
236
+ # uvicorn.run(app, host="0.0.0.0", port=7860)
237
+
238
+
239
+
240
+
241
+
242
+ # initial:
243
+ # from fastapi import FastAPI
244
+
245
+ # app = FastAPI()
246
+
247
+ # @app.get("/")
248
+ # def home():
249
+ # '''Fuck Everyday Bitch'''
250
+ # return {"Everything's": "OK bTICH✅"}
251
+
252
+
253
+ # final:
254
+ # #2
255
+ # from fastapi import FastAPI, File, UploadFile
256
+ # from fastapi.responses import JSONResponse
257
+ # import numpy as np
258
+ # import cv2
259
+ # import base64
260
+ # import logging
261
+ # from face_recognition_system import FaceRecognitionSystem # import your class
262
+
263
+ # # Set up logging
264
+ # logging.basicConfig(level=logging.INFO)
265
+
266
+ # app = FastAPI()
267
+ # face_recog_system = FaceRecognitionSystem()
268
+
269
+ # # Load known faces
270
+ # try:
271
+ # face_recog_system.process_known_faces("./data/known/custom/")
272
+ # logging.info("Loaded known faces successfully.")
273
+ # except Exception as e:
274
+ # logging.error(f"Error loading known faces: {e}")
275
+
276
+ # @app.post("/detect_faces")
277
+ # async def detect_faces(file: UploadFile = File(...)):
278
+ # try:
279
+ # # Read and decode image from the uploaded file
280
+ # image_data = await file.read()
281
+ # nparr = np.frombuffer(image_data, np.uint8)
282
+ # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
283
+
284
+ # # Check if image is loaded
285
+ # if img is None:
286
+ # logging.error("Failed to decode image. Ensure the uploaded file is a valid image.")
287
+ # return JSONResponse(content={"error": "Invalid image file"}, status_code=400)
288
+
289
+ # # Run detection and identification
290
+ # detected_img = face_recog_system.detect_and_identify(img)
291
+
292
+ # # Encode imNONOFage to base64
293
+ # success, buffer = cv2.imencode('.jpg', detected_img)
294
+ # if not success:
295
+ # logging.error("Image encoding failed.")
296
+ # return JSONResponse(content={"error": "Image encoding failed"}, status_code=500)
297
+
298
+ # processed_image_base64 = base64.b64encode(buffer).decode("utf-8")
299
+
300
+ # # Optional: Check if face embeddings were created
301
+ # if not face_recog_system.known_face_embeddings:
302
+ # logging.warning("No faces detected.")
303
+
304
+ # # NOTE:
305
+ # # Convert numpy arrays to lists for JSON serialization
306
+ # serializable_embeddings = {
307
+ # name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding
308
+ # for name, embedding in face_recog_system.known_face_embeddings.items()
309
+ # }
310
+ # return JSONResponse(content={
311
+ # "processed_image": processed_image_base64,
312
+ # "faces": serializable_embeddings
313
+ # })
314
+
315
+ # # return JSONResponse(content={"processed_image": processed_image_base64, "faces": face_recog_system.known_face_embeddings})
316
+
317
+ # except Exception as e:
318
+ # logging.error(f"Error processing image: {e}")
319
+ # return JSONResponse(content={"error": "An error occurred while processing the image"}, status_code=500)
320
+
321
+
322
+ # # main:
323
+ # # NOTE: ALWAYS FIRST CHECK IPv4-Address via: <ipconfig>
324
+ # # import uvicorn
325
+ # # if __name__ == "__main__":
326
+ # # uvicorn.run(app='app:app',
327
+ # # host='192.168.1.17', port=7860, reload=True)
328
+
329
+
330
+
face_recognition_system.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 2:
2
+ import cv2
3
+ import numpy as np
4
+ import insightface
5
+ from insightface.app import FaceAnalysis
6
+ from insightface.utils import download_onnx
7
+ from pathlib import Path
8
+ from typing import Dict, List, Tuple
9
+ import pickle
10
+ import logging
11
+ import os
12
+
13
+ class FaceRecognitionSystem:
14
+ def __init__(self, model_name: str = "buffalo_l", model_root: str = "./models"):
15
+ # Set up logging
16
+ logging.basicConfig(level=logging.INFO)
17
+ self.logger = logging.getLogger(__name__)
18
+
19
+ # Create model directory if it doesn't exist
20
+ self.model_root = Path(model_root)
21
+ self.model_root.mkdir(parents=True, exist_ok=True)
22
+
23
+ # Set InsightFace model root
24
+ # insightface.utils.set_download_root(str(self.model_root))
25
+ # insightface.utils.download(root='./models_x', sub_dir='downloads', name='file')
26
+
27
+ # Initialize the face analysis model
28
+ try:
29
+ self.face_analyzer = FaceAnalysis(
30
+ name=model_name,
31
+ root=str(self.model_root),
32
+ download=True # Allow downloading if model doesn't exist
33
+ )
34
+ self.face_analyzer.prepare(ctx_id=-1, det_size=(640, 640)) # Using CPU
35
+ self.logger.info(f"Face analyzer initialized successfully in {self.model_root}")
36
+ except Exception as e:
37
+ self.logger.error(f"Error initializing face analyzer: {e}")
38
+ raise
39
+
40
+ # Dictionary to store known face embeddings
41
+ self.known_face_embeddings: Dict[str, np.ndarray] = {}
42
+
43
+ def process_known_faces(self, people_folder_path: str) -> None:
44
+ """Process and store embeddings of known faces from a folder."""
45
+ embeddings_file = self.model_root / "known_faces_embeddings.pkl"
46
+
47
+ try:
48
+ # Load existing embeddings if available
49
+ if embeddings_file.exists():
50
+ with open(embeddings_file, 'rb') as f:
51
+ self.known_face_embeddings = pickle.load(f)
52
+ self.logger.info("Loaded existing face embeddings")
53
+ return
54
+
55
+ self.logger.info("Processing known faces...")
56
+ people_path = Path(people_folder_path)
57
+ if not people_path.exists():
58
+ self.logger.warning(f"Directory not found: {people_folder_path}")
59
+ return
60
+
61
+ for person_path in people_path.glob("*"):
62
+ if person_path.is_dir():
63
+ person_name = person_path.name
64
+ embeddings_list = []
65
+
66
+ for img_path in person_path.glob("*"):
67
+ if img_path.suffix.lower() in ['.jpg', '.jpeg', '.png']:
68
+ img = cv2.imread(str(img_path))
69
+ if img is None:
70
+ self.logger.warning(f"Could not read image: {img_path}")
71
+ continue
72
+
73
+ faces = self.face_analyzer.get(img)
74
+ if faces:
75
+ embeddings_list.append(faces[0].embedding)
76
+ else:
77
+ self.logger.warning(f"No face detected in {img_path}")
78
+
79
+ if embeddings_list:
80
+ self.known_face_embeddings[person_name] = np.mean(embeddings_list, axis=0)
81
+ self.logger.info(f"Processed {person_name}'s faces")
82
+ else:
83
+ self.logger.warning(f"No valid faces found for {person_name}")
84
+
85
+ # Save embeddings in model directory
86
+ with open(embeddings_file, 'wb') as f:
87
+ pickle.dump(self.known_face_embeddings, f)
88
+ self.logger.info(f"Face embeddings saved to {embeddings_file}")
89
+
90
+ except Exception as e:
91
+ self.logger.error(f"Error processing known faces: {e}")
92
+ raise
93
+
94
+ def identify_face(self, face_embedding: np.ndarray, threshold: float = 0.6) -> Tuple[str, float]:
95
+ """Identify a face by comparing its embedding with known faces."""
96
+ try:
97
+ best_match = "Unknown"
98
+ best_score = float('inf')
99
+
100
+ for person_name, known_embedding in self.known_face_embeddings.items():
101
+ similarity = np.dot(face_embedding, known_embedding) / (
102
+ np.linalg.norm(face_embedding) * np.linalg.norm(known_embedding)
103
+ )
104
+ distance = 1 - similarity
105
+
106
+ if distance < best_score:
107
+ best_score = distance
108
+ best_match = person_name
109
+
110
+ return (best_match, best_score) if best_score < threshold else ("Unknown", best_score)
111
+
112
+ except Exception as e:
113
+ self.logger.error(f"Error in face identification: {e}")
114
+ return ("Error", 1.0)
115
+
116
+ def detect_and_identify(self, image_input) -> np.ndarray:
117
+ """Detect and identify faces in an input image."""
118
+ try:
119
+ # Handle both string paths and numpy arrays
120
+ if isinstance(image_input, str):
121
+ img = cv2.imread(image_input)
122
+ else:
123
+ img = image_input
124
+
125
+ if img is None:
126
+ raise ValueError("Could not read input image")
127
+
128
+ faces = self.face_analyzer.get(img)
129
+
130
+ for face in faces:
131
+ bbox = face.bbox.astype(int)
132
+ embedding = face.embedding
133
+ name, score = self.identify_face(embedding)
134
+
135
+ cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
136
+ label = f"{name} ({1-score:.2f})"
137
+
138
+ cv2.putText(img, label.upper(), (bbox[0], bbox[1]-10),
139
+ cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 255, 0), 2)
140
+
141
+ return img
142
+
143
+ except Exception as e:
144
+ self.logger.error(f"Error in detection and identification: {e}")
145
+ raise
146
+
147
+
148
+ # 1:
149
+ # import cv2
150
+ # import numpy as np
151
+ # import insightface
152
+ # from insightface.app import FaceAnalysis
153
+ # from pathlib import Path
154
+ # from typing import Dict, List, Tuple
155
+ # import pickle
156
+ # import logging
157
+
158
+ # class FaceRecognitionSystem:
159
+ # def __init__(self, model_name: str = "buffalo_l"):
160
+ # # Set up logging
161
+ # logging.basicConfig(level=logging.INFO)
162
+ # self.logger = logging.getLogger(__name__)
163
+
164
+ # # Initialize the face analysis model
165
+ # try:
166
+ # self.face_analyzer = FaceAnalysis(name=model_name)
167
+ # self.face_analyzer.prepare(ctx_id=-1, det_size=(640, 640)) # Using CPU
168
+ # self.logger.info("Face analyzer initialized successfully")
169
+ # except Exception as e:
170
+ # self.logger.error(f"Error initializing face analyzer: {e}")
171
+ # raise
172
+
173
+ # # Dictionary to store known face embeddings
174
+ # self.known_face_embeddings: Dict[str, np.ndarray] = {}
175
+
176
+ # def process_known_faces(self, people_folder_path: str) -> None:
177
+ # """Process and store embeddings of known faces from a folder."""
178
+ # embeddings_file = Path("known_faces_embeddings.pkl")
179
+
180
+ # try:
181
+ # # Load existing embeddings if available
182
+ # if embeddings_file.exists():
183
+ # with open(embeddings_file, 'rb') as f:
184
+ # self.known_face_embeddings = pickle.load(f)
185
+ # self.logger.info("Loaded existing face embeddings")
186
+ # return
187
+
188
+ # self.logger.info("Processing known faces...")
189
+ # people_path = Path(people_folder_path)
190
+ # if not people_path.exists():
191
+ # self.logger.warning(f"Directory not found: {people_folder_path}")
192
+ # return
193
+
194
+ # for person_path in people_path.glob("*"):
195
+ # if person_path.is_dir():
196
+ # person_name = person_path.name
197
+ # embeddings_list = []
198
+
199
+ # for img_path in person_path.glob("*"):
200
+ # if img_path.suffix.lower() in ['.jpg', '.jpeg', '.png']:
201
+ # img = cv2.imread(str(img_path))
202
+ # if img is None:
203
+ # self.logger.warning(f"Could not read image: {img_path}")
204
+ # continue
205
+
206
+ # faces = self.face_analyzer.get(img)
207
+ # if faces:
208
+ # embeddings_list.append(faces[0].embedding)
209
+ # else:
210
+ # self.logger.warning(f"No face detected in {img_path}")
211
+
212
+ # if embeddings_list:
213
+ # self.known_face_embeddings[person_name] = np.mean(embeddings_list, axis=0)
214
+ # self.logger.info(f"Processed {person_name}'s faces")
215
+ # else:
216
+ # self.logger.warning(f"No valid faces found for {person_name}")
217
+
218
+ # # Save embeddings
219
+ # with open(embeddings_file, 'wb') as f:
220
+ # pickle.dump(self.known_face_embeddings, f)
221
+ # self.logger.info("Face processing complete")
222
+
223
+ # except Exception as e:
224
+ # self.logger.error(f"Error processing known faces: {e}")
225
+ # raise
226
+
227
+ # def identify_face(self, face_embedding: np.ndarray, threshold: float = 0.6) -> Tuple[str, float]:
228
+ # """Identify a face by comparing its embedding with known faces."""
229
+ # try:
230
+ # best_match = "Unknown"
231
+ # best_score = float('inf')
232
+
233
+ # for person_name, known_embedding in self.known_face_embeddings.items():
234
+ # similarity = np.dot(face_embedding, known_embedding) / (
235
+ # np.linalg.norm(face_embedding) * np.linalg.norm(known_embedding)
236
+ # )
237
+ # distance = 1 - similarity
238
+
239
+ # if distance < best_score:
240
+ # best_score = distance
241
+ # best_match = person_name
242
+
243
+ # return (best_match, best_score) if best_score < threshold else ("Unknown", best_score)
244
+
245
+ # except Exception as e:
246
+ # self.logger.error(f"Error in face identification: {e}")
247
+ # return ("Error", 1.0)
248
+
249
+ # def detect_and_identify(self, image_input) -> np.ndarray:
250
+ # """Detect and identify faces in an input image."""
251
+ # try:
252
+ # # Handle both string paths and numpy arrays
253
+ # if isinstance(image_input, str):
254
+ # img = cv2.imread(image_input)
255
+ # else:
256
+ # img = image_input
257
+
258
+ # if img is None:
259
+ # raise ValueError("Could not read input image")
260
+
261
+ # faces = self.face_analyzer.get(img)
262
+
263
+ # for face in faces:
264
+ # bbox = face.bbox.astype(int)
265
+ # embedding = face.embedding
266
+ # name, score = self.identify_face(embedding)
267
+
268
+ # cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
269
+ # label = f"{name} ({1-score:.2f})"
270
+
271
+ # cv2.putText(img, label.upper(), (bbox[0], bbox[1]-10),
272
+ # cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 255, 0), 2)
273
+
274
+ # return img
275
+
276
+ # except Exception as e:
277
+ # self.logger.error(f"Error in detection and identification: {e}")
278
+ # raise
279
+
280
+
281
+
282
+ # OLD:
283
+ # import cv2
284
+ # import numpy as np
285
+ # import insightface
286
+ # from insightface.app import FaceAnalysis
287
+ # from insightface.data import get_image as ins_get_image
288
+ # import os
289
+ # from pathlib import Path
290
+ # from typing import Dict, List, Tuple
291
+ # import pickle
292
+
293
+ # class FaceRecognitionSystem:
294
+ # def __init__(self, model_name: str = "buffalo_l"):
295
+ # # Initialize the face analysis model
296
+ # self.face_analyzer = FaceAnalysis(name=model_name)
297
+ # self.face_analyzer.prepare(ctx_id=0, det_size=(640, 640))
298
+
299
+ # # Dictionary to store known face embeddings
300
+ # self.known_face_embeddings: Dict[str, np.ndarray] = {}
301
+
302
+ # def process_known_faces(self, people_folder_path: str) -> None:
303
+ # """Process and store embeddings of known faces from a folder."""
304
+ # # Create embeddings file path
305
+ # # embeddings_file = Path("known_face_embeddings copy2.pkl")
306
+ # embeddings_file = Path("data/model/known_faces_embeddings.pkl")
307
+
308
+ # # Load existing embeddings if available
309
+ # if embeddings_file.exists():
310
+ # with open(embeddings_file, 'rb') as f:
311
+ # self.known_face_embeddings = pickle.load(f)
312
+ # print("Loaded existing face embeddings.")
313
+ # return
314
+
315
+ # print("Processing known faces...")
316
+ # for person_path in Path(people_folder_path).glob("*"):
317
+ # if person_path.is_dir():
318
+ # person_name = person_path.name
319
+ # embeddings_list = []
320
+
321
+ # # Process each image in person's folder
322
+ # for img_path in person_path.glob("*"):
323
+ # if img_path.suffix.lower() in ['.jpg', '.jpeg', '.png']:
324
+ # img = cv2.imread(str(img_path))
325
+ # if img is None:
326
+ # continue
327
+
328
+ # # Get face embedding
329
+ # faces = self.face_analyzer.get(img)
330
+ # if faces:
331
+ # embeddings_list.append(faces[0].embedding)
332
+
333
+ # if embeddings_list:
334
+ # # Average all embeddings for this person
335
+ # self.known_face_embeddings[person_name] = np.mean(embeddings_list, axis=0)
336
+ # print(f"Processed {person_name}'s faces")
337
+
338
+ # # Save embeddings for future use
339
+ # with open(embeddings_file, 'wb') as f:
340
+ # pickle.dump(self.known_face_embeddings, f)
341
+ # print("Face processing complete.")
342
+
343
+ # # OLD:
344
+ # def identify_face(self, face_embedding: np.ndarray, threshold: float = 0.6) -> Tuple[str, float]:
345
+ # """Identify a face by comparing its embedding with known faces."""
346
+ # best_match = "Unknown"
347
+ # best_score = float('inf')
348
+
349
+ # for person_name, known_embedding in self.known_face_embeddings.items():
350
+ # # Calculate cosine similarity
351
+ # similarity = np.dot(face_embedding, known_embedding) / (
352
+ # np.linalg.norm(face_embedding) * np.linalg.norm(known_embedding)
353
+ # )
354
+ # distance = 1 - similarity
355
+
356
+ # if distance < best_score:
357
+ # best_score = distance
358
+ # best_match = person_name
359
+
360
+ # return (best_match, best_score) if best_score < threshold else ("Unknown", best_score)
361
+
362
+
363
+
364
+ # def detect_and_identify(self, image_input, output_path: str = None) -> np.ndarray:
365
+ # """Detect and identify faces in an input image."""
366
+ # # Handle both string paths and numpy arrays
367
+ # if isinstance(image_input, str):
368
+ # img = cv2.imread(image_input)
369
+ # else:
370
+ # img = image_input
371
+
372
+ # if img is None:
373
+ # raise ValueError("Could not read input image")
374
+
375
+ # # Rest of the code remains the same
376
+ # faces = self.face_analyzer.get(img)
377
+
378
+ # for face in faces:
379
+ # bbox = face.bbox.astype(int)
380
+ # embedding = face.embedding
381
+ # name, score = self.identify_face(embedding)
382
+
383
+ # cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
384
+ # label = f"{name} ({1-score:.2f})"
385
+
386
+ # cv2.putText(img, label.upper(), (bbox[0], bbox[1]-10),
387
+ # cv2.FONT_HERSHEY_PLAIN, 4.2, (0, 255, 0), 2)
388
+ # # cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
389
+
390
+ # if output_path:
391
+ # cv2.imwrite(output_path, img)
392
+
393
+ # return img
394
+
395
+
396
+ # # def detect_and_identify(self, image_path: str, output_path: str = None) -> np.ndarray:
397
+ # # """Detect and identify faces in an input image."""
398
+ # # # Read input image
399
+ # # img = cv2.imread(image_path)
400
+ # # if img is None:
401
+ # # raise ValueError("Could not read input image")
402
+
403
+ # # # Detect faces
404
+ # # faces = self.face_analyzer.get(img)
405
+
406
+ # # # Draw results on image
407
+ # # for face in faces:
408
+ # # bbox = face.bbox.astype(int)
409
+ # # embedding = face.embedding
410
+ # # name, score = self.identify_face(embedding)
411
+
412
+ # # # Draw rectangle around face
413
+ # # cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
414
+
415
+ # # # Add name and confidence score
416
+ # # label = f"{name} ({1-score:.2f})"
417
+ # # cv2.putText(img, label, (bbox[0], bbox[1]-10),
418
+ # # cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
419
+
420
+ # # # Save output image if path provided
421
+ # # if output_path:
422
+ # # cv2.imwrite(output_path, img)
423
+
424
+ # # return img
known_faces_embeddings.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0839cd316f561634361c53ce48b8240d822cec7ec7ba3f567c5e10471fdcf342
3
+ size 10573
models/known_faces_embeddings.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0839cd316f561634361c53ce48b8240d822cec7ec7ba3f567c5e10471fdcf342
3
+ size 10573
models/models/buffalo_l.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ffe37d8a5940d59a7384c201a2a38d4741f2f3c51eef46ebb28218a7b0ca2f
3
+ size 288621354
models/models/buffalo_l/1k3d68.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df5c06b8a0c12e422b2ed8947b8869faa4105387f199c477af038aa01f9a45cc
3
+ size 143607619
models/models/buffalo_l/2d106det.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f001b856447c413801ef5c42091ed0cd516fcd21f2d6b79635b1e733a7109dbf
3
+ size 5030888
models/models/buffalo_l/det_10g.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5838f7fe053675b1c7a08b633df49e7af5495cee0493c7dcf6697200b85b5b91
3
+ size 16923827
models/models/buffalo_l/genderage.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fde69b1c810857b88c64a335084f1c3fe8f01246c9a191b48c7bb756d6652fb
3
+ size 1322532
models/models/buffalo_l/w600k_r50.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c06341c33c2ca1f86781dab0e829f88ad5b64be9fba56e56bc9ebdefc619e43
3
+ size 174383860
notes.ipynb ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# **Docker NOTES 📝🖌️**\n",
8
+ "\n",
9
+ "1. **Create `Dockerfile`**\n",
10
+ "2. **`docker buildx build -t attendify_fastapi_backend-hf .`**\n",
11
+ "3. **`docker run -v E:\\MULTIFACE_RECOGNITION_CLASSROOM\\Backend\\ATTENDIFY_BACKEND:/app --name attendify_fastapi_backend_container -p 7860:7860 attendify_fastapi_backend-hf`**"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "* **Updating Docker Image without rebuilding again and only including the necessary packages:**\n",
19
+ " \n",
20
+ " **`Dockerfile.update`**: \n",
21
+ "\n",
22
+ " `FROM attend_b-hf`\n",
23
+ " \n",
24
+ " `RUN pip install <new_package_1> <new_package_2>`\n",
25
+ "\n",
26
+ " Then run:\n",
27
+ "\n",
28
+ " `docker build -t attend_b-hf-update -f Dockerfile.update .`"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "markdown",
33
+ "metadata": {},
34
+ "source": [
35
+ "# **FastAPI NOTES**\n",
36
+ "\n",
37
+ "1. **Learning about: Middlewares & CORS(`Cross Origin Resource Sharing`)**"
38
+ ]
39
+ }
40
+ ],
41
+ "metadata": {
42
+ "language_info": {
43
+ "name": "python"
44
+ }
45
+ },
46
+ "nbformat": 4,
47
+ "nbformat_minor": 2
48
+ }
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi[all]
2
+ # Installed Torch & Torchvision: Via Dockerfile
3
+ # torch
4
+ # torchvision
5
+ onnxruntime
6
+ insightface
7
+
8
+ python-multipart
9
+ opencv-python-headless
10
+ pillow