Image-Verifier / app.py
Zeyadd-Mostaffa's picture
Update app.py
eaa1a24 verified
raw
history blame
3.31 kB
import gradio as gr
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.xception import preprocess_input as xcp_pre
from tensorflow.keras.applications.efficientnet import preprocess_input as eff_pre
from huggingface_hub import hf_hub_download
from PIL import Image
# Load models from Hugging Face Hub
xcp_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector", filename="xception_model.h5")
eff_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector", filename="efficientnet_model.h5")
xcp_model = load_model(xcp_path)
eff_model = load_model(eff_path)
# Face detection using OpenCV
def detect_face_opencv(pil_image):
cv_img = np.array(pil_image.convert("RGB"))
cv_img = cv_img[:, :, ::-1] # RGB to BGR
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4)
if len(faces) == 0:
return pil_image # fallback to original
(x, y, w, h) = max(faces, key=lambda b: b[2]*b[3])
return pil_image.crop((x, y, x+w, y+h))
def grad_cam(model, img, size, preprocess_func):
img_resized = img.resize(size)
x = img_to_array(img_resized)
x = np.expand_dims(x, axis=0)
x = preprocess_func(x)
x_tensor = tf.convert_to_tensor(x)
grad_model = Model([model.inputs], [model.layers[-3].output, model.output])
with tf.GradientTape() as tape:
conv_outputs, predictions = grad_model(x_tensor)
loss = predictions[:, 0]
grads = tape.gradient(loss, conv_outputs)
cam = tf.reduce_mean(grads, axis=-1).numpy()[0]
cam = np.maximum(cam, 0)
cam /= cam.max() if cam.max() != 0 else 1
heatmap = cv2.resize(cam, size)
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
img_np = np.array(img_resized)
if img_np.shape[-1] == 4:
img_np = img_np[:, :, :3]
superimposed = cv2.addWeighted(img_np, 0.6, heatmap, 0.4, 0)
return Image.fromarray(cv2.cvtColor(superimposed, cv2.COLOR_BGR2RGB))
# Preprocessing helper
def preprocess(img, size, func):
img = img.resize(size)
arr = img_to_array(img)
arr = np.expand_dims(arr, axis=0)
return func(arr)
# Prediction function
def predict(image):
face = detect_face_opencv(image)
xcp_input = preprocess(face, (299, 299), xcp_pre)
eff_input = preprocess(face, (224, 224), eff_pre)
xcp_pred = xcp_model.predict(xcp_input)[0][0]
eff_pred = eff_model.predict(eff_input)[0][0]
ensemble_prob = (xcp_pred + eff_pred) / 2
label = "REAL" if ensemble_prob > 0.5 else "FAKE"
cam_img = grad_cam(xcp_model, face, (299, 299), xcp_pre)
return f"{label} ({ensemble_prob:.2%} confidence)", cam_img
# Gradio UI
gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs=["text", "image"],
title="Deepfake Image Detector (with Grad-CAM)",
description="Upload an image. We detect the face, classify using an ensemble (Xception + EfficientNetB4), and explain the prediction using Grad-CAM on Xception."
).launch()