wuhp commited on
Commit
af52f8e
·
verified ·
1 Parent(s): 5e61afe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -90
app.py CHANGED
@@ -12,21 +12,15 @@ from typing import Optional, Dict, List, Tuple
12
  from dataclasses import dataclass, field
13
  from collections import Counter
14
 
15
- # Gradio
16
  import gradio as gr
17
 
18
- # PyTorch, YOLO, FaceNet, and deep_sort
19
  from ultralytics import YOLO
20
  from facenet_pytorch import InceptionResnetV1
21
  from torchvision import transforms
22
  from deep_sort_realtime.deepsort_tracker import DeepSort
23
 
24
- # Mediapipe for face mesh, iris detection, blink detection, and hand tracking
25
  import mediapipe as mp
26
 
27
- # --------------------------------------------------------------------
28
- # LOGGING
29
- # --------------------------------------------------------------------
30
  logging.basicConfig(
31
  level=logging.INFO,
32
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
@@ -34,31 +28,22 @@ logging.basicConfig(
34
  )
35
  logger = logging.getLogger(__name__)
36
 
37
- # Mute debug logs from third-party libraries
38
  logging.getLogger('torch').setLevel(logging.ERROR)
39
  logging.getLogger('mediapipe').setLevel(logging.ERROR)
40
  logging.getLogger('deep_sort_realtime').setLevel(logging.ERROR)
41
 
42
- # --------------------------------------------------------------------
43
- # CONSTANTS
44
- # --------------------------------------------------------------------
45
  DEFAULT_MODEL_URL = "https://github.com/wuhplaptop/face-11-n/blob/main/face2.pt?raw=true"
46
  DEFAULT_DB_PATH = os.path.expanduser("~/.face_pipeline/known_faces.pkl")
47
  MODEL_DIR = os.path.expanduser("~/.face_pipeline/models")
48
  CONFIG_PATH = os.path.expanduser("~/.face_pipeline/config.pkl")
49
 
50
- # Landmark indices for blink detection
51
  LEFT_EYE_IDX = [33, 160, 158, 133, 153, 144]
52
  RIGHT_EYE_IDX = [263, 387, 385, 362, 380, 373]
53
 
54
- # Mediapipe references
55
  mp_drawing = mp.solutions.drawing_utils
56
  mp_face_mesh = mp.solutions.face_mesh
57
  mp_hands = mp.solutions.hands
58
 
59
- # --------------------------------------------------------------------
60
- # DATACLASS: PipelineConfig
61
- # --------------------------------------------------------------------
62
  @dataclass
63
  class PipelineConfig:
64
  detector: Dict = field(default_factory=dict)
@@ -141,9 +126,6 @@ class PipelineConfig:
141
  logger.error(f"Config load failed: {str(e)}")
142
  return cls()
143
 
144
- # --------------------------------------------------------------------
145
- # FACE DATABASE
146
- # --------------------------------------------------------------------
147
  class FaceDatabase:
148
  def __init__(self, db_path: str = DEFAULT_DB_PATH):
149
  self.db_path = db_path
@@ -208,9 +190,6 @@ class FaceDatabase:
208
  results.append((lbl, similarity))
209
  return sorted(results, key=lambda x: x[1], reverse=True)
210
 
211
- # --------------------------------------------------------------------
212
- # YOLO FACE DETECTOR
213
- # --------------------------------------------------------------------
214
  class YOLOFaceDetector:
215
  def __init__(self, model_path: str, device: str = 'cpu'):
216
  self.model = None
@@ -250,9 +229,6 @@ class YOLOFaceDetector:
250
  logger.error(f"Detection error: {str(e)}")
251
  return []
252
 
253
- # --------------------------------------------------------------------
254
- # FACE TRACKER (DeepSort)
255
- # --------------------------------------------------------------------
256
  class FaceTracker:
257
  def __init__(self, max_age: int = 30):
258
  self.tracker = DeepSort(max_age=max_age, embedder='mobilenet')
@@ -270,9 +246,6 @@ class FaceTracker:
270
  logger.error(f"Tracking error: {str(e)}")
271
  return []
272
 
273
- # --------------------------------------------------------------------
274
- # FACENET EMBEDDER
275
- # --------------------------------------------------------------------
276
  class FaceNetEmbedder:
277
  def __init__(self, device: str = 'cpu'):
278
  self.device = device
@@ -296,9 +269,6 @@ class FaceNetEmbedder:
296
  logger.error(f"Embedding failed: {str(e)}")
297
  return None
298
 
299
- # --------------------------------------------------------------------
300
- # BLINK DETECTION
301
- # --------------------------------------------------------------------
302
  def detect_blink(face_roi: np.ndarray, threshold: float = 0.25) -> Tuple[bool, float, float, np.ndarray, np.ndarray]:
303
  """
304
  Returns:
@@ -341,9 +311,6 @@ def detect_blink(face_roi: np.ndarray, threshold: float = 0.25) -> Tuple[bool, f
341
  logger.error(f"Blink detection error: {str(e)}")
342
  return False, 0.0, 0.0, None, None
343
 
344
- # --------------------------------------------------------------------
345
- # FACE MESH + IRIS DETECTION / DRAWING
346
- # --------------------------------------------------------------------
347
  def process_face_mesh(face_roi: np.ndarray):
348
  try:
349
  fm_proc = mp_face_mesh.FaceMesh(
@@ -391,9 +358,6 @@ def draw_face_mesh(image: np.ndarray, face_landmarks, config: Dict, pipeline_con
391
  connection_drawing_spec=mp_drawing.DrawingSpec(color=iris_color_bgr, thickness=2)
392
  )
393
 
394
- # --------------------------------------------------------------------
395
- # EYE COLOR DETECTION
396
- # --------------------------------------------------------------------
397
  EYE_COLOR_RANGES = {
398
  "amber": (255, 191, 0),
399
  "blue": (0, 0, 255),
@@ -452,7 +416,7 @@ def detect_eye_color(face_roi: np.ndarray, face_landmarks) -> Optional[str]:
452
  y2 = min(h, max_y + pad)
453
 
454
  eye_roi = face_roi[y1:y2, x1:x2]
455
- # Resize for more stable KMeans
456
  eye_roi_resize = cv2.resize(eye_roi, (40, 40), interpolation=cv2.INTER_AREA)
457
 
458
  if eye_roi_resize.size == 0:
@@ -463,9 +427,6 @@ def detect_eye_color(face_roi: np.ndarray, face_landmarks) -> Optional[str]:
463
  return classify_eye_color(dom_rgb)
464
  return None
465
 
466
- # --------------------------------------------------------------------
467
- # HAND TRACKER
468
- # --------------------------------------------------------------------
469
  class HandTracker:
470
  def __init__(self, min_detection_confidence=0.5, min_tracking_confidence=0.5):
471
  self.hands = mp_hands.Hands(
@@ -491,7 +452,7 @@ class HandTracker:
491
 
492
  mpdraw = mp_drawing
493
  for i, hlms in enumerate(hand_landmarks):
494
- # Convert user config colors from (R,G,B) to (B,G,R)
495
  hl_color = config.hand_landmark_color[::-1]
496
  hc_color = config.hand_connection_color[::-1]
497
  mpdraw.draw_landmarks(
@@ -505,7 +466,7 @@ class HandTracker:
505
  label = handedness[i].classification[0].label
506
  score = handedness[i].classification[0].score
507
  text = f"{label}: {score:.2f}"
508
- # We'll place text near the wrist
509
  wrist_lm = hlms.landmark[mp_hands.HandLandmark.WRIST]
510
  h, w, _ = image.shape
511
  cx, cy = int(wrist_lm.x * w), int(wrist_lm.y * h)
@@ -513,9 +474,6 @@ class HandTracker:
513
  cv2.putText(image, text, (cx, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ht_color, 2)
514
  return image
515
 
516
- # --------------------------------------------------------------------
517
- # FACE PIPELINE
518
- # --------------------------------------------------------------------
519
  class FacePipeline:
520
  def __init__(self, config: PipelineConfig):
521
  self.config = config
@@ -528,19 +486,18 @@ class FacePipeline:
528
 
529
  def initialize(self):
530
  try:
531
- # YOLO for face detection
532
  self.detector = YOLOFaceDetector(
533
  model_path=self.config.detector['model_path'],
534
  device=self.config.detector['device']
535
  )
536
- # DeepSort tracking
537
  self.tracker = FaceTracker(max_age=self.config.tracker['max_age'])
538
- # FaceNet embedder
539
  self.facenet = FaceNetEmbedder(device=self.config.detector['device'])
540
- # Database
541
  self.db = FaceDatabase()
542
 
543
- # Hand tracker if enabled
544
  if self.config.hand['enable']:
545
  self.hand_tracker = HandTracker(
546
  min_detection_confidence=self.config.hand['min_detection_confidence'],
@@ -564,13 +521,12 @@ class FacePipeline:
564
  return frame, []
565
 
566
  try:
567
- # YOLO detection + DeepSort tracking
568
  detections = self.detector.detect(frame, self.config.detection_conf_thres)
569
  tracked_objs = self.tracker.update(detections, frame)
570
  annotated = frame.copy()
571
  results = []
572
 
573
- # Hand detection if enabled
574
  hand_landmarks_list = None
575
  handedness_list = None
576
  if self.config.hand['enable'] and self.hand_tracker:
@@ -594,19 +550,18 @@ class FacePipeline:
594
  logger.warning(f"Empty face ROI for track={track_id}")
595
  continue
596
 
597
- # Anti-spoofing
598
  is_spoofed = False
599
  if self.config.anti_spoof.get('enable', True):
600
  is_spoofed = not self.is_real_face(face_roi)
601
  if is_spoofed:
602
- cls = 1 # Mark as spoofed
603
 
604
  if is_spoofed:
605
  box_color_bgr = self.config.spoofed_bbox_color[::-1]
606
  name = "Spoofed"
607
  similarity = 0.0
608
  else:
609
- # Face embedding + recognition
610
  emb = self.facenet.get_embedding(face_roi)
611
  if emb is not None and self.config.recognition.get('enable', True):
612
  name, similarity = self.recognize_face(emb, self.config.recognition_conf_thres)
@@ -623,17 +578,16 @@ class FacePipeline:
623
  cv2.putText(annotated, label_text, (x1, y1 - 10),
624
  cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color_bgr, 2)
625
 
626
- # Blink detection
627
  blink = False
628
  if self.config.blink.get('enable', False):
629
  blink, left_ear, right_ear, left_eye_pts, right_eye_pts = detect_blink(
630
  face_roi, threshold=self.config.blink.get('ear_thresh', 0.25)
631
  )
632
  if left_eye_pts is not None and right_eye_pts is not None:
633
- # Shift points to global coords
634
  le_g = left_eye_pts + np.array([x1, y1])
635
  re_g = right_eye_pts + np.array([x1, y1])
636
- # Outline eyes
637
  eye_outline_bgr = self.config.eye_outline_color[::-1]
638
  cv2.polylines(annotated, [le_g], True, eye_outline_bgr, 1)
639
  cv2.polylines(annotated, [re_g], True, eye_outline_bgr, 1)
@@ -644,14 +598,13 @@ class FacePipeline:
644
  cv2.FONT_HERSHEY_SIMPLEX, 0.5,
645
  blink_msg_color, 2)
646
 
647
- # Face mesh + eye color
648
  face_mesh_landmarks = None
649
  eye_color_name = None
650
  if (self.config.face_mesh_options.get('enable') or
651
  self.config.eye_color.get('enable')):
652
  face_mesh_landmarks = process_face_mesh(face_roi)
653
  if face_mesh_landmarks:
654
- # If user wants to draw face mesh
655
  if self.config.face_mesh_options.get('enable', False):
656
  draw_face_mesh(
657
  annotated[y1:y2, x1:x2],
@@ -659,7 +612,7 @@ class FacePipeline:
659
  self.config.face_mesh_options,
660
  self.config
661
  )
662
- # Eye color
663
  if self.config.eye_color.get('enable', False):
664
  color_found = detect_eye_color(face_roi, face_mesh_landmarks)
665
  if color_found:
@@ -672,7 +625,6 @@ class FacePipeline:
672
  text_col_bgr, 2
673
  )
674
 
675
- # Record result
676
  detection_info = {
677
  "track_id": track_id,
678
  "bbox": (x1, y1, x2, y2),
@@ -724,9 +676,6 @@ class FacePipeline:
724
  def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
725
  return float(np.dot(a, b) / ((np.linalg.norm(a)*np.linalg.norm(b)) + 1e-6))
726
 
727
- # --------------------------------------------------------------------
728
- # GLOBAL LOADER
729
- # --------------------------------------------------------------------
730
  pipeline = None
731
  def load_pipeline() -> FacePipeline:
732
  global pipeline
@@ -736,9 +685,6 @@ def load_pipeline() -> FacePipeline:
736
  pipeline.initialize()
737
  return pipeline
738
 
739
- # --------------------------------------------------------------------
740
- # UTILITY: HEX <-> BGR
741
- # --------------------------------------------------------------------
742
  def hex_to_bgr(hexstr: str) -> Tuple[int,int,int]:
743
  if not hexstr.startswith('#'):
744
  hexstr = '#' + hexstr
@@ -754,16 +700,13 @@ def bgr_to_hex(bgr: Tuple[int,int,int]) -> str:
754
  b,g,r = bgr
755
  return f"#{r:02x}{g:02x}{b:02x}"
756
 
757
- # --------------------------------------------------------------------
758
- # GRADIO CALLBACKS
759
- # --------------------------------------------------------------------
760
  def update_config(
761
- # toggles
762
  enable_recognition, enable_antispoof, enable_blink, enable_hand, enable_eyecolor, enable_facemesh,
763
  show_tesselation, show_contours, show_irises,
764
- # thresholds
765
  detection_conf, recognition_thresh, antispoof_thresh, blink_thresh, hand_det_conf, hand_track_conf,
766
- # colors
767
  bbox_hex, spoofed_hex, unknown_hex, eye_hex, blink_hex,
768
  hand_landmark_hex, hand_connect_hex, hand_text_hex,
769
  mesh_hex, contour_hex, iris_hex, eye_color_text_hex
@@ -771,7 +714,6 @@ def update_config(
771
  pl = load_pipeline()
772
  cfg = pl.config
773
 
774
- # Toggles
775
  cfg.recognition['enable'] = enable_recognition
776
  cfg.anti_spoof['enable'] = enable_antispoof
777
  cfg.blink['enable'] = enable_blink
@@ -783,7 +725,6 @@ def update_config(
783
  cfg.face_mesh_options['contours'] = show_contours
784
  cfg.face_mesh_options['irises'] = show_irises
785
 
786
- # Thresholds
787
  cfg.detection_conf_thres = detection_conf
788
  cfg.recognition_conf_thres = recognition_thresh
789
  cfg.anti_spoof['lap_thresh'] = antispoof_thresh
@@ -791,7 +732,6 @@ def update_config(
791
  cfg.hand['min_detection_confidence'] = hand_det_conf
792
  cfg.hand['min_tracking_confidence'] = hand_track_conf
793
 
794
- # Colors
795
  cfg.bbox_color = hex_to_bgr(bbox_hex)[::-1]
796
  cfg.spoofed_bbox_color = hex_to_bgr(spoofed_hex)[::-1]
797
  cfg.unknown_bbox_color = hex_to_bgr(unknown_hex)[::-1]
@@ -823,7 +763,7 @@ def enroll_user(label_name: str, filepaths: List[str]) -> str:
823
  img_bgr = cv2.imread(path)
824
  if img_bgr is None:
825
  continue
826
- # Detect face(s)
827
  dets = pl.detector.detect(img_bgr, pl.config.detection_conf_thres)
828
  for x1, y1, x2, y2, conf, cls in dets:
829
  roi = img_bgr[y1:y2, x1:x2]
@@ -898,14 +838,10 @@ def process_test_image(img: np.ndarray) -> Tuple[np.ndarray, str]:
898
  result_rgb = cv2.cvtColor(processed, cv2.COLOR_BGR2RGB)
899
  return result_rgb, str(detections)
900
 
901
- # --------------------------------------------------------------------
902
- # BUILD GRADIO APP
903
- # --------------------------------------------------------------------
904
  def build_app():
905
  with gr.Blocks() as demo:
906
  gr.Markdown("# Complete Face Recognition System (Single-Image) with Mediapipe")
907
 
908
- # Tab: Image Test
909
  with gr.Tab("Image Test"):
910
  gr.Markdown("Upload a single image to detect faces, run blink detection, face mesh, hand tracking, etc.")
911
  test_in = gr.Image(type="numpy", label="Upload Image")
@@ -919,7 +855,6 @@ def build_app():
919
  outputs=[test_out, test_info],
920
  )
921
 
922
- # Tab: Configuration
923
  with gr.Tab("Configuration"):
924
  gr.Markdown("Adjust toggles, thresholds, and colors. Click Save to persist changes.")
925
 
@@ -931,7 +866,6 @@ def build_app():
931
  enable_eyecolor = gr.Checkbox(label="Enable Eye Color Detection", value=False)
932
  enable_facemesh = gr.Checkbox(label="Enable Face Mesh", value=False)
933
 
934
- # Face Mesh sub-options
935
  gr.Markdown("**Face Mesh Options**")
936
  with gr.Row():
937
  show_tesselation = gr.Checkbox(label="Tesselation", value=False)
@@ -978,7 +912,6 @@ def build_app():
978
  outputs=[save_msg]
979
  )
980
 
981
- # Tab: Database Management
982
  with gr.Tab("Database Management"):
983
  gr.Markdown("Enroll multiple images per user, search by name or image, remove users, list all users.")
984
 
@@ -1046,10 +979,7 @@ def build_app():
1046
 
1047
  return demo
1048
 
1049
- # --------------------------------------------------------------------
1050
- # MAIN
1051
- # --------------------------------------------------------------------
1052
  if __name__ == "__main__":
1053
  app = build_app()
1054
- # queue() is optional if concurrency is expected
1055
- app.queue().launch(server_name="0.0.0.0", server_port=7860)
 
12
  from dataclasses import dataclass, field
13
  from collections import Counter
14
 
 
15
  import gradio as gr
16
 
 
17
  from ultralytics import YOLO
18
  from facenet_pytorch import InceptionResnetV1
19
  from torchvision import transforms
20
  from deep_sort_realtime.deepsort_tracker import DeepSort
21
 
 
22
  import mediapipe as mp
23
 
 
 
 
24
  logging.basicConfig(
25
  level=logging.INFO,
26
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
 
28
  )
29
  logger = logging.getLogger(__name__)
30
 
 
31
  logging.getLogger('torch').setLevel(logging.ERROR)
32
  logging.getLogger('mediapipe').setLevel(logging.ERROR)
33
  logging.getLogger('deep_sort_realtime').setLevel(logging.ERROR)
34
 
 
 
 
35
  DEFAULT_MODEL_URL = "https://github.com/wuhplaptop/face-11-n/blob/main/face2.pt?raw=true"
36
  DEFAULT_DB_PATH = os.path.expanduser("~/.face_pipeline/known_faces.pkl")
37
  MODEL_DIR = os.path.expanduser("~/.face_pipeline/models")
38
  CONFIG_PATH = os.path.expanduser("~/.face_pipeline/config.pkl")
39
 
 
40
  LEFT_EYE_IDX = [33, 160, 158, 133, 153, 144]
41
  RIGHT_EYE_IDX = [263, 387, 385, 362, 380, 373]
42
 
 
43
  mp_drawing = mp.solutions.drawing_utils
44
  mp_face_mesh = mp.solutions.face_mesh
45
  mp_hands = mp.solutions.hands
46
 
 
 
 
47
  @dataclass
48
  class PipelineConfig:
49
  detector: Dict = field(default_factory=dict)
 
126
  logger.error(f"Config load failed: {str(e)}")
127
  return cls()
128
 
 
 
 
129
  class FaceDatabase:
130
  def __init__(self, db_path: str = DEFAULT_DB_PATH):
131
  self.db_path = db_path
 
190
  results.append((lbl, similarity))
191
  return sorted(results, key=lambda x: x[1], reverse=True)
192
 
 
 
 
193
  class YOLOFaceDetector:
194
  def __init__(self, model_path: str, device: str = 'cpu'):
195
  self.model = None
 
229
  logger.error(f"Detection error: {str(e)}")
230
  return []
231
 
 
 
 
232
  class FaceTracker:
233
  def __init__(self, max_age: int = 30):
234
  self.tracker = DeepSort(max_age=max_age, embedder='mobilenet')
 
246
  logger.error(f"Tracking error: {str(e)}")
247
  return []
248
 
 
 
 
249
  class FaceNetEmbedder:
250
  def __init__(self, device: str = 'cpu'):
251
  self.device = device
 
269
  logger.error(f"Embedding failed: {str(e)}")
270
  return None
271
 
 
 
 
272
  def detect_blink(face_roi: np.ndarray, threshold: float = 0.25) -> Tuple[bool, float, float, np.ndarray, np.ndarray]:
273
  """
274
  Returns:
 
311
  logger.error(f"Blink detection error: {str(e)}")
312
  return False, 0.0, 0.0, None, None
313
 
 
 
 
314
  def process_face_mesh(face_roi: np.ndarray):
315
  try:
316
  fm_proc = mp_face_mesh.FaceMesh(
 
358
  connection_drawing_spec=mp_drawing.DrawingSpec(color=iris_color_bgr, thickness=2)
359
  )
360
 
 
 
 
361
  EYE_COLOR_RANGES = {
362
  "amber": (255, 191, 0),
363
  "blue": (0, 0, 255),
 
416
  y2 = min(h, max_y + pad)
417
 
418
  eye_roi = face_roi[y1:y2, x1:x2]
419
+
420
  eye_roi_resize = cv2.resize(eye_roi, (40, 40), interpolation=cv2.INTER_AREA)
421
 
422
  if eye_roi_resize.size == 0:
 
427
  return classify_eye_color(dom_rgb)
428
  return None
429
 
 
 
 
430
  class HandTracker:
431
  def __init__(self, min_detection_confidence=0.5, min_tracking_confidence=0.5):
432
  self.hands = mp_hands.Hands(
 
452
 
453
  mpdraw = mp_drawing
454
  for i, hlms in enumerate(hand_landmarks):
455
+
456
  hl_color = config.hand_landmark_color[::-1]
457
  hc_color = config.hand_connection_color[::-1]
458
  mpdraw.draw_landmarks(
 
466
  label = handedness[i].classification[0].label
467
  score = handedness[i].classification[0].score
468
  text = f"{label}: {score:.2f}"
469
+
470
  wrist_lm = hlms.landmark[mp_hands.HandLandmark.WRIST]
471
  h, w, _ = image.shape
472
  cx, cy = int(wrist_lm.x * w), int(wrist_lm.y * h)
 
474
  cv2.putText(image, text, (cx, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ht_color, 2)
475
  return image
476
 
 
 
 
477
  class FacePipeline:
478
  def __init__(self, config: PipelineConfig):
479
  self.config = config
 
486
 
487
  def initialize(self):
488
  try:
489
+
490
  self.detector = YOLOFaceDetector(
491
  model_path=self.config.detector['model_path'],
492
  device=self.config.detector['device']
493
  )
494
+
495
  self.tracker = FaceTracker(max_age=self.config.tracker['max_age'])
496
+
497
  self.facenet = FaceNetEmbedder(device=self.config.detector['device'])
498
+
499
  self.db = FaceDatabase()
500
 
 
501
  if self.config.hand['enable']:
502
  self.hand_tracker = HandTracker(
503
  min_detection_confidence=self.config.hand['min_detection_confidence'],
 
521
  return frame, []
522
 
523
  try:
524
+
525
  detections = self.detector.detect(frame, self.config.detection_conf_thres)
526
  tracked_objs = self.tracker.update(detections, frame)
527
  annotated = frame.copy()
528
  results = []
529
 
 
530
  hand_landmarks_list = None
531
  handedness_list = None
532
  if self.config.hand['enable'] and self.hand_tracker:
 
550
  logger.warning(f"Empty face ROI for track={track_id}")
551
  continue
552
 
 
553
  is_spoofed = False
554
  if self.config.anti_spoof.get('enable', True):
555
  is_spoofed = not self.is_real_face(face_roi)
556
  if is_spoofed:
557
+ cls = 1
558
 
559
  if is_spoofed:
560
  box_color_bgr = self.config.spoofed_bbox_color[::-1]
561
  name = "Spoofed"
562
  similarity = 0.0
563
  else:
564
+
565
  emb = self.facenet.get_embedding(face_roi)
566
  if emb is not None and self.config.recognition.get('enable', True):
567
  name, similarity = self.recognize_face(emb, self.config.recognition_conf_thres)
 
578
  cv2.putText(annotated, label_text, (x1, y1 - 10),
579
  cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color_bgr, 2)
580
 
 
581
  blink = False
582
  if self.config.blink.get('enable', False):
583
  blink, left_ear, right_ear, left_eye_pts, right_eye_pts = detect_blink(
584
  face_roi, threshold=self.config.blink.get('ear_thresh', 0.25)
585
  )
586
  if left_eye_pts is not None and right_eye_pts is not None:
587
+
588
  le_g = left_eye_pts + np.array([x1, y1])
589
  re_g = right_eye_pts + np.array([x1, y1])
590
+
591
  eye_outline_bgr = self.config.eye_outline_color[::-1]
592
  cv2.polylines(annotated, [le_g], True, eye_outline_bgr, 1)
593
  cv2.polylines(annotated, [re_g], True, eye_outline_bgr, 1)
 
598
  cv2.FONT_HERSHEY_SIMPLEX, 0.5,
599
  blink_msg_color, 2)
600
 
 
601
  face_mesh_landmarks = None
602
  eye_color_name = None
603
  if (self.config.face_mesh_options.get('enable') or
604
  self.config.eye_color.get('enable')):
605
  face_mesh_landmarks = process_face_mesh(face_roi)
606
  if face_mesh_landmarks:
607
+
608
  if self.config.face_mesh_options.get('enable', False):
609
  draw_face_mesh(
610
  annotated[y1:y2, x1:x2],
 
612
  self.config.face_mesh_options,
613
  self.config
614
  )
615
+
616
  if self.config.eye_color.get('enable', False):
617
  color_found = detect_eye_color(face_roi, face_mesh_landmarks)
618
  if color_found:
 
625
  text_col_bgr, 2
626
  )
627
 
 
628
  detection_info = {
629
  "track_id": track_id,
630
  "bbox": (x1, y1, x2, y2),
 
676
  def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
677
  return float(np.dot(a, b) / ((np.linalg.norm(a)*np.linalg.norm(b)) + 1e-6))
678
 
 
 
 
679
  pipeline = None
680
  def load_pipeline() -> FacePipeline:
681
  global pipeline
 
685
  pipeline.initialize()
686
  return pipeline
687
 
 
 
 
688
  def hex_to_bgr(hexstr: str) -> Tuple[int,int,int]:
689
  if not hexstr.startswith('#'):
690
  hexstr = '#' + hexstr
 
700
  b,g,r = bgr
701
  return f"#{r:02x}{g:02x}{b:02x}"
702
 
 
 
 
703
  def update_config(
704
+
705
  enable_recognition, enable_antispoof, enable_blink, enable_hand, enable_eyecolor, enable_facemesh,
706
  show_tesselation, show_contours, show_irises,
707
+
708
  detection_conf, recognition_thresh, antispoof_thresh, blink_thresh, hand_det_conf, hand_track_conf,
709
+
710
  bbox_hex, spoofed_hex, unknown_hex, eye_hex, blink_hex,
711
  hand_landmark_hex, hand_connect_hex, hand_text_hex,
712
  mesh_hex, contour_hex, iris_hex, eye_color_text_hex
 
714
  pl = load_pipeline()
715
  cfg = pl.config
716
 
 
717
  cfg.recognition['enable'] = enable_recognition
718
  cfg.anti_spoof['enable'] = enable_antispoof
719
  cfg.blink['enable'] = enable_blink
 
725
  cfg.face_mesh_options['contours'] = show_contours
726
  cfg.face_mesh_options['irises'] = show_irises
727
 
 
728
  cfg.detection_conf_thres = detection_conf
729
  cfg.recognition_conf_thres = recognition_thresh
730
  cfg.anti_spoof['lap_thresh'] = antispoof_thresh
 
732
  cfg.hand['min_detection_confidence'] = hand_det_conf
733
  cfg.hand['min_tracking_confidence'] = hand_track_conf
734
 
 
735
  cfg.bbox_color = hex_to_bgr(bbox_hex)[::-1]
736
  cfg.spoofed_bbox_color = hex_to_bgr(spoofed_hex)[::-1]
737
  cfg.unknown_bbox_color = hex_to_bgr(unknown_hex)[::-1]
 
763
  img_bgr = cv2.imread(path)
764
  if img_bgr is None:
765
  continue
766
+
767
  dets = pl.detector.detect(img_bgr, pl.config.detection_conf_thres)
768
  for x1, y1, x2, y2, conf, cls in dets:
769
  roi = img_bgr[y1:y2, x1:x2]
 
838
  result_rgb = cv2.cvtColor(processed, cv2.COLOR_BGR2RGB)
839
  return result_rgb, str(detections)
840
 
 
 
 
841
  def build_app():
842
  with gr.Blocks() as demo:
843
  gr.Markdown("# Complete Face Recognition System (Single-Image) with Mediapipe")
844
 
 
845
  with gr.Tab("Image Test"):
846
  gr.Markdown("Upload a single image to detect faces, run blink detection, face mesh, hand tracking, etc.")
847
  test_in = gr.Image(type="numpy", label="Upload Image")
 
855
  outputs=[test_out, test_info],
856
  )
857
 
 
858
  with gr.Tab("Configuration"):
859
  gr.Markdown("Adjust toggles, thresholds, and colors. Click Save to persist changes.")
860
 
 
866
  enable_eyecolor = gr.Checkbox(label="Enable Eye Color Detection", value=False)
867
  enable_facemesh = gr.Checkbox(label="Enable Face Mesh", value=False)
868
 
 
869
  gr.Markdown("**Face Mesh Options**")
870
  with gr.Row():
871
  show_tesselation = gr.Checkbox(label="Tesselation", value=False)
 
912
  outputs=[save_msg]
913
  )
914
 
 
915
  with gr.Tab("Database Management"):
916
  gr.Markdown("Enroll multiple images per user, search by name or image, remove users, list all users.")
917
 
 
979
 
980
  return demo
981
 
 
 
 
982
  if __name__ == "__main__":
983
  app = build_app()
984
+
985
+ app.queue().launch(server_name="0.0.0.0", server_port=7860)