VIT_Demo / app.py
litav's picture
Update app.py
f67e289 verified
raw
history blame
1.75 kB
import gradio as gr
from PIL import Image
from vit_model_test import CustomModel
import time # 专拽 诇爪讜专讱 住讬诪讜诇爪讬讛 砖诇 讝诪谉 注讬讘讜讚
# Initialize the model
model = CustomModel()
def predict(image: Image.Image):
# 住讬诪讜诇爪讬讛 砖诇 讝诪谉 注讬讘讜讚
time.sleep(3)
# 讛驻注诇转 讛诪讜讚诇 讻讚讬 诇住讜讜讙 讗转 讛转诪讜谞讛
label, confidence = model.predict(image)
result = "AI image" if label == 1 else "Real image"
return result, f"Confidence: {confidence:.2f}%"
def loading_animation(image):
# 诪爪讬讙 讜讬讚讗讜 注讚 砖转讜爪讗转 讛诪讜讚诇 诪转拽讘诇转
return gr.Video.update(visible=True), "", ""
def show_results(image):
# 诪驻注讬诇 讗转 讛诪讜讚诇 讜诪讞讝讬专 讗转 讛转讜爪讗讜转
result, confidence = predict(image)
return gr.Video.update(visible=False), result, confidence
# 讬爪讬专转 诪诪砖拽 Gradio
with gr.Blocks() as demo:
with gr.Row():
# 拽诇讟 转诪讜谞讛
image_input = gr.Image(type="pil", label="Upload an image")
# 讜讬讚讗讜 讗谞讬诪爪讬讛 (诪讜住转专 讘讛转讞诇讛)
animation = gr.Video("https://cdn-uploads.huggingface.co/production/uploads/66d6f1b3b50e35e1709bfdf7/x7Ud8PO9QPfmrTvBVcCKE.mp4", visible=False)
# 转讜爪讗讜转
output_label = gr.Textbox(label="Classification Result", interactive=False, visible=True)
output_confidence = gr.Textbox(label="Confidence", interactive=False, visible=True)
# 讗讬专讜注讬诐
image_input.change(loading_animation, inputs=image_input, outputs=[animation, output_label, output_confidence])
image_input.submit(show_results, inputs=image_input, outputs=[animation, output_label, output_confidence])
# 讛砖拽转 讛诪诪砖拽
demo.launch()