File size: 1,583 Bytes
7ebfeb9 afda258 7ebfeb9 afda258 7ebfeb9 afda258 7ebfeb9 afda258 7ebfeb9 afda258 b673a60 a86795b afda258 7ebfeb9 afda258 7ebfeb9 c9cfda8 c89d0e7 afda258 7ebfeb9 5d8bab6 afda258 0759962 b673a60 7ebfeb9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
from PIL import Image
import clipGPT
# Define model loading functions (if needed)
def load_model_1(): # CLIP-GPT2
# Load model components here if necessary
return None
# ... load_model_2(), load_model_3() - Define if and when needed
# Caption generation functions
def generate_caption_clipgpt(image):
caption = clipGPT.generate_caption_clipgpt(image)
return caption
# ... Add more caption generation functions for future models
# Sample image paths
sample_images = [
"CXR191_IM-0591-1001.png",
"CXR192_IM-0598-1001.png",
"CXR193_IM-0601-1001.png",
"CXR194_IM-0609-1001.png",
"CXR195_IM-0618-1001.png"
]
# Gradio interface
with gr.Blocks() as demo:
with gr.Row():
image = gr.Image(label="Upload Chest X-ray")
sample_images_gallery = gr.Image(sample_images, label="Sample Images")
with gr.Row():
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
with gr.Row():
caption = gr.Textbox(label="Generated Caption")
def predict(img, model_name):
if model_name == "CLIP-GPT2":
return generate_caption_clipgpt(img)
# Add elif blocks for "ViT-GPT2", "ViT-CoAttention" as you implement them
else:
return "Caption generation for this model is not yet implemented."
# Handle changes for both uploaded and sample images
image.change(predict, [image, model_choice], caption)
sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption)
demo.launch()
|