import gradio as gr from transformers import AutoProcessor, AutoModelForVision2Seq from PIL import Image import torch # 模型和处理器(InternVL3) model_id = "OpenGVLab/InternVL3-14B" processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") # 推理函数 def infer(image, prompt): inputs = processor(prompt=prompt, images=image, return_tensors="pt").to("cuda") output = model.generate(**inputs, max_new_tokens=256) response = processor.decode(output[0], skip_special_tokens=True) return response # Gradio UI gr.Interface( fn=infer, inputs=[ gr.Image(type="pil", label="Upload Image"), gr.Textbox(label="Your Prompt", placeholder="Ask a question about the image...") ], outputs="text", title="InternVL3-14B Visual Chat", description="Upload an image and enter a prompt. InternVL3-14B will answer accordingly." ).launch()