Spaces:
Running
Running
import gradio as gr | |
from transformers import ViTForImageClassification, ViTImageProcessor | |
from PIL import Image | |
import torch | |
# Load the model and processor from Hugging Face | |
model = ViTForImageClassification.from_pretrained("dima806/deepfake_vs_real_image_detection") | |
processor = ViTImageProcessor.from_pretrained("dima806/deepfake_vs_real_image_detection") | |
def detect(image): | |
"""Detect deepfake content in an image using dima806/deepfake_vs_real_image_detection""" | |
if image is None: | |
raise gr.Error("Please upload an image to analyze") | |
try: | |
# Convert Gradio image (filepath) to PIL Image | |
pil_image = Image.open(image).convert("RGB") | |
# Preprocess the image | |
inputs = processor(images=pil_image, return_tensors="pt") | |
# Perform inference | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
logits = outputs.logits | |
predicted_class = torch.argmax(logits, dim=1).item() | |
# Get confidence scores | |
probabilities = torch.softmax(logits, dim=1)[0] | |
confidence_real = probabilities[0].item() * 100 | |
confidence_fake = probabilities[1].item() * 100 | |
# Map class index to label | |
label = model.config.id2label[predicted_class] | |
# Prepare output | |
overall = f"{max(confidence_real, confidence_fake):.1f}% Confidence" | |
aigen = f"{confidence_fake:.1f}% (AI-Generated Content Likelihood)" | |
deepfake = f"{confidence_fake:.1f}% (Face Manipulation Likelihood)" | |
return overall, aigen, deepfake | |
except Exception as e: | |
raise gr.Error(f"Analysis error: {str(e)}") | |
# Custom CSS (unchanged from your original) | |
custom_css = """ | |
.container { | |
max-width: 1200px; | |
margin: 0 auto; | |
padding: 20px; | |
font-family: 'Arial', sans-serif; | |
} | |
.header { | |
color: #2c3e50; | |
border-bottom: 2px solid #3498db; | |
padding-bottom: 10px; | |
} | |
.button-gradient { | |
background: linear-gradient(45deg, #3498db, #2ecc71, #9b59b6); | |
background-size: 400% 400%; | |
border: none; | |
padding: 12px 24px; | |
font-size: 16px; | |
font-weight: 600; | |
color: white; | |
border-radius: 8px; | |
cursor: pointer; | |
transition: all 0.3s ease; | |
animation: gradientAnimation 3s ease infinite; | |
box-shadow: 0 2px 8px rgba(52, 152, 219, 0.3); | |
} | |
.button-gradient:hover { | |
transform: translateY(-2px); | |
box-shadow: 0 4px 12px rgba(52, 152, 219, 0.5); | |
} | |
@keyframes gradientAnimation { | |
0% { background-position: 0% 50%; } | |
50% { background-position: 100% 50%; } | |
100% { background-position: 0% 50%; } | |
} | |
""" | |
MARKDOWN0 = """ | |
<div class="header"> | |
<h1>DeepFake Detection System</h1> | |
<p>Advanced AI-powered analysis for identifying manipulated media<br> | |
Powered by dima806/deepfake_vs_real_image_detection model</p> | |
</div> | |
""" | |
# Create Gradio interface | |
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo: | |
gr.Markdown(MARKDOWN0) | |
with gr.Row(elem_classes="container"): | |
with gr.Column(scale=1): | |
image = gr.Image(type='filepath', height=400, label="Upload Image") | |
detect_button = gr.Button("Analyze Image", elem_classes="button-gradient") | |
with gr.Column(scale=2): | |
overall = gr.Label(label="Confidence Score") | |
aigen = gr.Label(label="AI-Generated Content") | |
deepfake = gr.Label(label="Face Manipulation") | |
detect_button.click( | |
fn=detect, | |
inputs=[image], | |
outputs=[overall, aigen, deepfake] | |
) | |
# Launch the application | |
demo.launch( | |
debug=True | |
) | |