File size: 1,496 Bytes
83cac08
 
 
bdaa99a
93a4013
83cac08
 
 
 
bdaa99a
 
83cac08
 
 
6917b4f
bdaa99a
 
 
 
 
 
6917b4f
bdaa99a
 
f5fa3dc
bdaa99a
 
 
 
 
 
f5fa3dc
 
bdaa99a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from PIL import Image
from vit_model_test import CustomModel
import time

# Initialize the model
model = CustomModel()

def predict(image: Image.Image):
    # 住讬诪讜诇爪讬讛 砖诇 讝诪谉 注讬讘讜讚 (诇诪砖诇 5 砖谞讬讜转)
    time.sleep(5)  # 讗驻砖专 诇砖谞讜转 讗转 讛讝诪谉 诇驻讬 讛爪讜专讱
    label, confidence = model.predict(image)
    result = "AI image" if label == 1 else "Real image"
    return result, f"Confidence: {confidence:.2f}%"

# Define the Gradio interface
with gr.Blocks() as demo:
    image_input = gr.Image(type="pil", label="Upload an image")
    animation = gr.Video("https://cdn-uploads.huggingface.co/production/uploads/66d6f1b3b50e35e1709bfdf7/x7Ud8PO9QPfmrTvBVcCKE.mp4", visible=False)  # 讛讜住驻转 讛讜讜讬讚讗讜
    output_label = gr.Textbox(label="Classification Result", interactive=False)
    output_confidence = gr.Textbox(label="Confidence", interactive=False)

    def show_animation(image):
        return animation.update(visible=True), "", ""  # 诇讛爪讬讙 讗转 讛讗谞讬诪爪讬讛

    def hide_animation(image):
        result, confidence = predict(image)
        return animation.update(visible=False), result, confidence  # 诇讛住转讬专 讗转 讛讗谞讬诪爪讬讛

    image_input.change(show_animation, inputs=image_input, outputs=[animation, output_label, output_confidence])
    image_input.change(hide_animation, inputs=image_input, outputs=[animation, output_label, output_confidence])

# Launch the Gradio interface
demo.launch()