Jyothirmai commited on
Commit
dd914ca
Β·
verified Β·
1 Parent(s): 2bfbbec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -24
app.py CHANGED
@@ -29,38 +29,31 @@ with gr.Blocks() as demo:
29
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
30
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
31
 
 
 
 
 
32
 
33
  with gr.Row():
34
- sample_images = [
35
- 'https://imgur.com/W1pIr9b',
36
- 'https://imgur.com/MLJaWnf',
37
- 'https://imgur.com/6XymFW1',
38
- 'https://imgur.com/zdPjZZ1',
39
- 'https://imgur.com/DKUlZbF'
40
- ]
41
-
42
-
43
- image = gr.Image(label="Upload Chest X-ray", type="pil")
44
-
45
- sample_images_gallery = gr.Gallery(value = sample_images,label="Sample Images")
46
 
47
  gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
48
-
49
-
50
  with gr.Row():
51
-
52
- with gr.Column(): # Column for dropdowns and model choice
53
  max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
54
  temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
55
-
56
- model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
57
-
58
- generate_button = gr.Button("Generate Caption")
59
-
60
 
61
-
62
  caption = gr.Textbox(label="Generated Caption")
63
 
 
 
 
 
64
  def predict(img, model_name, max_tokens, temperature):
65
  if model_name == "CLIP-GPT2":
66
  return generate_caption_clipgpt(img, max_tokens, temperature)
@@ -69,12 +62,25 @@ with gr.Blocks() as demo:
69
  elif model_name == "ViT-CoAttention":
70
  return generate_caption_vitCoAtt(img)
71
  else:
72
- return "Caption generation for this model is not yet implemented."
73
 
 
 
 
 
 
 
 
 
 
 
 
74
 
 
75
  # Event handlers
76
  generate_button.click(predict, [image, model_choice, max_tokens, temperature], caption)
77
- sample_images_gallery.change(predict, [sample_images_gallery, model_choice, max_tokens, temperature], caption)
 
78
 
79
 
80
  demo.launch()
 
29
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
30
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
31
 
32
+ sample_data = [
33
+ {'image': 'https://imgur.com/W1pIr9b', 'max token, temp': '75, 0.7', 'model supported': 'CLIP-GPT2, ViT-GPT2, ViT-CoAttention', 'ground truth': '...'},
34
+ {'image': 'https://imgur.com/MLJaWnf', 'max token, temp': '50, 0.8', 'model supported': 'CLIP-GPT2, ViT-CoAttention', 'ground truth': '...'},
35
+ ]
36
 
37
  with gr.Row():
38
+ image = gr.Image(label="Upload Chest X-ray", type="pil")
39
+ image_table = gr.Dataframe(sample_data, headers=['image', 'max token, temp', 'model supported', 'ground truth'], datatype=['picture', 'str', 'str', 'str'])
 
 
 
 
 
 
 
 
 
 
40
 
41
  gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
42
+
 
43
  with gr.Row():
44
+ with gr.Column():
 
45
  max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
46
  temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
47
+
48
+ model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
 
 
 
49
 
50
+ generate_button = gr.Button("Generate Caption")
51
  caption = gr.Textbox(label="Generated Caption")
52
 
53
+
54
+ gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
55
+
56
+
57
  def predict(img, model_name, max_tokens, temperature):
58
  if model_name == "CLIP-GPT2":
59
  return generate_caption_clipgpt(img, max_tokens, temperature)
 
62
  elif model_name == "ViT-CoAttention":
63
  return generate_caption_vitCoAtt(img)
64
  else:
65
+ return "Caption generation for this model is not yet implemented."
66
 
67
+ def predict_from_table(row, model_name):
68
+ img_url = row['image']
69
+ img = Image.open(io.imread(img_url))
70
+ if model_name == "CLIP-GPT2":
71
+ return generate_caption_clipgpt(img, max_tokens, temperature)
72
+ elif model_name == "ViT-GPT2":
73
+ return generate_caption_vitgpt(img, max_tokens, temperature)
74
+ elif model_name == "ViT-CoAttention":
75
+ return generate_caption_vitCoAtt(img)
76
+ else:
77
+ return "Caption generation for this model is not yet implemented."
78
 
79
+
80
  # Event handlers
81
  generate_button.click(predict, [image, model_choice, max_tokens, temperature], caption)
82
+ image_table.click(predict_from_table, [image_table, model_choice], caption)
83
+
84
 
85
 
86
  demo.launch()