Spaces:
Runtime error
Runtime error
from transformers import ViTForImageClassification, ViTImageProcessor | |
import torch | |
from PIL import Image | |
import gradio as gr | |
import warnings | |
# Suppress warnings (optional) | |
warnings.filterwarnings('ignore') | |
try: | |
# Load model (smaller version for better performance) | |
model_name = "google/vit-base-patch16-224" | |
processor = ViTImageProcessor.from_pretrained(model_name) | |
model = ViTForImageClassification.from_pretrained(model_name) | |
print("Model loaded successfully!") | |
except Exception as e: | |
print(f"Error loading model: {e}") | |
raise | |
def detect_deepfake(image): | |
try: | |
# Convert image to RGB | |
if image.mode != 'RGB': | |
image = image.convert('RGB') | |
# Process image | |
inputs = processor(images=image, return_tensors="pt") | |
# Predict | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
# Get result | |
predicted_class = outputs.logits.argmax(-1).item() | |
return "Real" if predicted_class == 0 else "Fake (Possible Deepfake)" | |
except Exception as e: | |
return f"Error processing image: {str(e)}" | |
# Create interface | |
iface = gr.Interface( | |
fn=detect_deepfake, | |
inputs=gr.Image(type="pil"), | |
outputs="text", | |
title="Deepfake Detection", | |
examples=[ | |
["real_example.jpg"], # Add your example files | |
["fake_example.jpg"] | |
] | |
) | |
iface.launch(server_port=7860, share=False) # Disable share for local use |