Jyothirmai commited on
Commit
b1241b7
Β·
verified Β·
1 Parent(s): 22ed06b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -23
app.py CHANGED
@@ -1,5 +1,3 @@
1
-
2
-
3
  import gradio as gr
4
  from PIL import Image
5
  import clipGPT
@@ -38,10 +36,6 @@ def generate_caption_vitgpt(image):
38
 
39
  with gr.Blocks() as demo:
40
 
41
- generated_captions = {
42
- "CLIP-GPT2": "",
43
- "ViT-GPT2": "",
44
- }
45
 
46
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
47
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
@@ -54,18 +48,23 @@ with gr.Blocks() as demo:
54
  "CXR194_IM-0609-1001.png",
55
  "CXR195_IM-0618-1001.png"
56
  ]
57
- image = gr.Image(label="Upload Chest X-ray")
58
- gr.Gallery(
59
- value = sample_images,
60
- label="Sample Images",
61
- )
62
- # sample_images_gallery = gr.Gallery(
63
- # value = sample_images,
64
- # label="Sample Images",
65
- # )
 
 
 
 
66
  with gr.Row():
67
  model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
68
  generate_button = gr.Button("Generate Caption")
 
69
  caption = gr.Textbox(label="Generated Caption")
70
 
71
  def predict(img, model_name):
@@ -75,19 +74,17 @@ with gr.Blocks() as demo:
75
  return generate_caption_vitgpt(img)
76
  else:
77
  return "Caption generation for this model is not yet implemented."
78
- generated_captions[model_name] = caption
79
 
80
  with gr.Row():
81
- caption1 = gr.Textbox(label="CLIP-GPT2")
82
- caption2 = gr.Textbox(label="ViT-GPT2")
83
- compare_button = gr.Button("Compare Captions")
84
  with gr.Row():
85
  comparison_result = gr.Textbox(label="Comparison Result")
86
 
87
- # Compare captions on button click
88
- compare_button.click(lambda: compare_and_highlight(
89
- generated_captions["CLIP-GPT2"], generated_captions["ViT-GPT2"]
90
- ), [], comparison_result)
91
 
92
 
93
  generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
 
 
 
1
  import gradio as gr
2
  from PIL import Image
3
  import clipGPT
 
36
 
37
  with gr.Blocks() as demo:
38
 
 
 
 
 
39
 
40
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
41
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
 
48
  "CXR194_IM-0609-1001.png",
49
  "CXR195_IM-0618-1001.png"
50
  ]
51
+
52
+ image = gr.Image(label="Upload Chest X-ray")
53
+
54
+ gr.Gallery(
55
+ value = sample_images,
56
+ label="Sample Images",
57
+ )
58
+
59
+ # sample_images_gallery = gr.Gallery(
60
+ # value = sample_images,
61
+ # label="Sample Images",
62
+ # )
63
+
64
  with gr.Row():
65
  model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
66
  generate_button = gr.Button("Generate Caption")
67
+
68
  caption = gr.Textbox(label="Generated Caption")
69
 
70
  def predict(img, model_name):
 
74
  return generate_caption_vitgpt(img)
75
  else:
76
  return "Caption generation for this model is not yet implemented."
 
77
 
78
  with gr.Row():
79
+ text1 = gr.Textbox(label="Text 1")
80
+ text2 = gr.Textbox(label="Text 2")
81
+ compare_button = gr.Button("Compare Texts")
82
  with gr.Row():
83
  comparison_result = gr.Textbox(label="Comparison Result")
84
 
85
+ # Event handlers
86
+ generate_button.click(predict, [image, model_choice], caption)
87
+ compare_button.click(lambda: compare_and_highlight(text1.value, text2.value), [], comparison_result)
 
88
 
89
 
90
  generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click