yemce's picture
Update app.py
387d241 verified
raw
history blame
3.88 kB
import cv2
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from mediapipe.framework.formats import landmark_pb2
import numpy as np
import gradio as gr
# Mediapipe FaceLandmarker seçeneklerini belirleyin
base_options = python.BaseOptions(model_asset_path='face_landmarker.task')
options = vision.FaceLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True,
output_facial_transformation_matrixes=True,
num_faces=1
)
detector = vision.FaceLandmarker.create_from_options(options)
# Landmark noktalarını çizmek için fonksiyon
def draw_landmarks_on_image(rgb_image, detection_result):
face_landmarks_list = detection_result.face_landmarks
annotated_image = np.copy(rgb_image)
for idx in range(len(face_landmarks_list)):
face_landmarks = face_landmarks_list[idx]
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
face_landmarks_proto.landmark.extend([
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
])
mp.solutions.drawing_utils.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks_proto,
connections=mp.solutions.face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=None,
connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_tesselation_style())
mp.solutions.drawing_utils.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks_proto,
connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_contours_style())
mp.solutions.drawing_utils.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks_proto,
connections=mp.solutions.face_mesh.FACEMESH_IRISES,
landmark_drawing_spec=None,
connection_drawing_spec=mp.solutions.drawing_styles.get_default_face_mesh_iris_connections_style())
return annotated_image
# Görseli işleyen fonksiyon
def process_image(image):
# OpenCV görüntüsünü Mediapipe formatına dönüştür
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_image)
# Yüz yer işaretlerini algıla
detection_result = detector.detect(mp_image)
# Çerçeveyi güncelle
if detection_result.face_blendshapes:
# İlk yüzün blendshape skorlarını al
face_blendshapes = detection_result.face_blendshapes[0]
# eyeBlinkLeft ve eyeBlinkRight blendshape skorlarını bul
blink_left = next((bs.score for bs in face_blendshapes if bs.category_name == "eyeBlinkLeft"), 0)
blink_right = next((bs.score for bs in face_blendshapes if bs.category_name == "eyeBlinkRight"), 0)
# Göz durumunu belirle
left_eye_status = "Kapalı" if blink_left > 0.5 else "Açık"
right_eye_status = "Kapalı" if blink_right > 0.5 else "Açık"
# Landmarkları çizin
annotated_image = draw_landmarks_on_image(rgb_image, detection_result)
return cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR), left_eye_status, right_eye_status
else:
return image, "Göz Tespiti Yok", "Göz Tespiti Yok"
# Gradio arayüzü
iface = gr.Interface(fn=process_image,
inputs=gr.Image(type="numpy", label="Görsel Yükleyin"), # Giriş olarak görsel al
outputs=[gr.Image(label="Yüz Tespiti Sonucu"),
gr.Textbox(label="Sol Göz Durumu"),
gr.Textbox(label="Sağ Göz Durumu")])
# Gradio arayüzünü başlat
iface.launch(share=True)