File size: 1,483 Bytes
e72bd1c
 
 
 
21aeebf
4307fea
21aeebf
 
4307fea
21aeebf
 
 
 
 
4307fea
21aeebf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4307fea
21aeebf
 
e72bd1c
21aeebf
e72bd1c
 
 
 
 
21aeebf
 
 
 
e72bd1c
4307fea
21aeebf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from transformers import ViTForImageClassification, ViTImageProcessor
import torch
from PIL import Image
import gradio as gr
import warnings

# Suppress warnings (optional)
warnings.filterwarnings('ignore')

try:
    # Load model (smaller version for better performance)
    model_name = "google/vit-base-patch16-224"
    processor = ViTImageProcessor.from_pretrained(model_name)
    model = ViTForImageClassification.from_pretrained(model_name)
    
    print("Model loaded successfully!")
except Exception as e:
    print(f"Error loading model: {e}")
    raise

def detect_deepfake(image):
    try:
        # Convert image to RGB
        if image.mode != 'RGB':
            image = image.convert('RGB')
            
        # Process image
        inputs = processor(images=image, return_tensors="pt")
        
        # Predict
        with torch.no_grad():
            outputs = model(**inputs)
        
        # Get result
        predicted_class = outputs.logits.argmax(-1).item()
        return "Real" if predicted_class == 0 else "Fake (Possible Deepfake)"
    
    except Exception as e:
        return f"Error processing image: {str(e)}"

# Create interface
iface = gr.Interface(
    fn=detect_deepfake,
    inputs=gr.Image(type="pil"),
    outputs="text",
    title="Deepfake Detection",
    examples=[
        ["real_example.jpg"],  # Add your example files
        ["fake_example.jpg"]
    ]
)

iface.launch(server_port=7860, share=False)  # Disable share for local use