import cv2 import mediapipe as mp from mediapipe.tasks import python from mediapipe.tasks.python import vision from mediapipe.framework.formats import landmark_pb2 import numpy as np import gradio as gr # Mediapipe FaceLandmarker seçeneklerini belirleyin base_options = python.BaseOptions(model_asset_path='c:\\face_landmarker.task') options = vision.FaceLandmarkerOptions( base_options=base_options, output_face_blendshapes=True, output_facial_transformation_matrixes=True, num_faces=1 ) detector = vision.FaceLandmarker.create_from_options(options) # Landmark noktalarını çizmek için fonksiyon def draw_landmarks_on_image(rgb_image, detection_result): face_landmarks_list = detection_result.face_landmarks annotated_image = np.copy(rgb_image) for idx in range(len(face_landmarks_list)): face_landmarks = face_landmarks_list[idx] face_landmarks_proto = landmark_pb2.NormalizedLandmarkList() face_landmarks_proto.landmark.extend([ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks ]) mp.solutions.drawing_utils.draw_landmarks( image=annotated_image, landmark_list=face_landmarks_proto, connections=mp.solutions.face_mesh.FACEMESH_TESSELATION, landmark_drawing_spec=None, connection_drawing_spec=mp.solutions.drawing_styles .get_default_face_mesh_tesselation_style()) mp.solutions.drawing_utils.draw_landmarks( image=annotated_image, landmark_list=face_landmarks_proto, connections=mp.solutions.face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=None, connection_drawing_spec=mp.solutions.drawing_styles .get_default_face_mesh_contours_style()) mp.solutions.drawing_utils.draw_landmarks( image=annotated_image, landmark_list=face_landmarks_proto, connections=mp.solutions.face_mesh.FACEMESH_IRISES, landmark_drawing_spec=None, connection_drawing_spec=mp.solutions.drawing_styles .get_default_face_mesh_iris_connections_style()) return annotated_image # Gradio için gerçek zamanlı video akışı işleme fonksiyonu def process_frame(frame): # OpenCV görüntüsünü Mediapipe formatına dönüştür rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame) # Yüz yer işaretlerini algıla detection_result = detector.detect(mp_image) # Çerçeveyi güncelle if detection_result.face_blendshapes: # İlk yüzün blendshape skorlarını al face_blendshapes = detection_result.face_blendshapes[0] # eyeBlinkLeft ve eyeBlinkRight blendshape skorlarını bul blink_left = next((bs.score for bs in face_blendshapes if bs.category_name == "eyeBlinkLeft"), 0) blink_right = next((bs.score for bs in face_blendshapes if bs.category_name == "eyeBlinkRight"), 0) # Göz durumunu belirle left_eye_status = "Kapalı" if blink_left > 0.5 else "Açık" right_eye_status = "Kapalı" if blink_right > 0.5 else "Açık" # Landmarkları çizin annotated_image = draw_landmarks_on_image(rgb_frame, detection_result) # # Çerçeveye göz durumunu yaz # cv2.putText(annotated_image, f"Sol Goz: {left_eye_status}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # cv2.putText(annotated_image, f"Sag Goz: {right_eye_status}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) return cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR), left_eye_status, right_eye_status else: return frame, "Göz Tespiti Yok", "Göz Tespiti Yok" # Gradio arayüzü def video_feed(): cap = cv2.VideoCapture(0) while True: success, frame = cap.read() if not success: break frame, left_eye_status, right_eye_status = process_frame(frame) yield frame, left_eye_status, right_eye_status iface = gr.Interface(fn=video_feed, inputs=None, # Giriş yok, sadece video akışı outputs=[gr.Image(type="numpy", label="Yüz Tespiti Sonucu"), gr.Textbox(label="Sol Göz Durumu"), gr.Textbox(label="Sağ Göz Durumu")], live=True) # Gradio arayüzünü başlat iface.launch(share=True)