Spaces:
Runtime error
Runtime error
import gradio as gr | |
from PIL import Image | |
import clipGPT | |
# Define model loading functions (if needed) | |
def load_model_1(): # CLIP-GPT2 | |
# Load model components here if necessary | |
return None | |
# ... load_model_2(), load_model_3() - Define if and when needed | |
# Caption generation functions | |
def generate_caption_clipgpt(image): | |
caption = clipGPT.generate_caption_clipgpt(image) | |
return caption | |
# ... Add more caption generation functions for future models | |
# Sample image paths | |
sample_images = [ | |
"CXR191_IM-0591-1001.png", | |
"CXR192_IM-0598-1001.png", | |
"CXR193_IM-0601-1001.png", | |
"CXR194_IM-0609-1001.png", | |
"CXR195_IM-0618-1001.png" | |
] | |
# Gradio interface | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
image = gr.Image(label="Upload Chest X-ray") | |
sample_images_gallery = gr.Gallery(sample_images, label="Sample Images", columns=5) | |
with gr.Row(): | |
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model") | |
with gr.Row(): | |
caption = gr.Textbox(label="Generated Caption") | |
def predict(img, model_name): | |
if model_name == "CLIP-GPT2": | |
return generate_caption_clipgpt(img) | |
# Add elif blocks for "ViT-GPT2", "ViT-CoAttention" as you implement them | |
else: | |
return "Caption generation for this model is not yet implemented." | |
# Handle changes for both uploaded and sample images | |
image.change(predict, [image, model_choice], caption) | |
sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) | |
demo.launch() | |