ChayanM commited on
Commit
378771b
·
1 Parent(s): 97edacb

Update Chest_Xray_Report_Generator-V2.py

Browse files

Update the main file of the Web APP Version (Version 2)

Files changed (1) hide show
  1. Chest_Xray_Report_Generator-V2.py +225 -32
Chest_Xray_Report_Generator-V2.py CHANGED
@@ -1,13 +1,20 @@
1
- import os
2
  import transformers
3
  from transformers import pipeline
 
 
4
  import gradio as gr
 
 
 
 
 
 
 
5
  import cv2
6
  import numpy as np
7
  import pydicom
8
- import time
9
-
10
- import spaces # Import the spaces module for ZeroGPU
11
 
12
  ##### Libraries For Grad-Cam-View
13
  import os
@@ -21,6 +28,17 @@ from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image
21
  from pytorch_grad_cam.ablation_layer import AblationLayerVit
22
  from transformers import VisionEncoderDecoderModel
23
 
 
 
 
 
 
 
 
 
 
 
 
24
  @spaces.GPU
25
  def generate_gradcam(image_path, model_path, output_path, method='gradcam', use_cuda=True, aug_smooth=False, eigen_smooth=False):
26
  methods = {
@@ -48,8 +66,8 @@ def generate_gradcam(image_path, model_path, output_path, method='gradcam', use_
48
 
49
  #target_layers = [model.blocks[-1].norm1] ## For ViT model
50
  #target_layers = model.blocks[-1].norm1 ## For EfficientNet-B7 model
51
- target_layers = [model.encoder.encoder.layer[-1].layernorm_before] ## For ViT-based VisionEncoderDecoder model
52
- #target_layers = [model.encoder.encoder.layers[-1].blocks[-1].layernorm_before, model.encoder.encoder.layers[-1].blocks[0].layernorm_before] ## For Swin-based VisionEncoderDecoder mode
53
 
54
 
55
  if method == "ablationcam":
@@ -65,7 +83,7 @@ def generate_gradcam(image_path, model_path, output_path, method='gradcam', use_
65
  reshape_transform=reshape_transform)
66
 
67
  rgb_img = cv2.imread(image_path, 1)[:, :, ::-1]
68
- rgb_img = cv2.resize(rgb_img, (224, 224)) ## (224, 224)
69
  rgb_img = np.float32(rgb_img) / 255
70
  input_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
71
 
@@ -80,7 +98,8 @@ def generate_gradcam(image_path, model_path, output_path, method='gradcam', use_
80
  cv2.imwrite(output_file, cam_image)
81
 
82
 
83
- def reshape_transform(tensor, height=14, width=14): ### height=14, width=14 for ViT-based Model
 
84
  batch_size, token_number, embed_dim = tensor.size()
85
  if token_number < height * width:
86
  pad = torch.zeros(batch_size, height * width - token_number, embed_dim, device=tensor.device)
@@ -93,11 +112,9 @@ def reshape_transform(tensor, height=14, width=14): ### height=14, width=14 for
93
  return result
94
 
95
 
96
-
97
-
98
  # Example usage:
99
  #image_path = "/home/chayan/CGI_Net/images/images/CXR1353_IM-0230-1001.png"
100
- model_path = "./Mimic_test/"
101
  output_path = "./CAM-Result/"
102
 
103
 
@@ -108,6 +125,50 @@ def sentence_case(paragraph):
108
  formatted_paragraph = '. '.join(formatted_sentences)
109
  return formatted_paragraph
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  def dicom_to_png(dicom_file, png_file):
112
  # Load DICOM file
113
  dicom_data = pydicom.dcmread(dicom_file)
@@ -125,12 +186,12 @@ def dicom_to_png(dicom_file, png_file):
125
  return img
126
 
127
 
128
- Image_Captioner = pipeline("image-to-text", model = "./Mimic_test/", device = 0)
129
 
130
  data_dir = "./CAM-Result"
131
 
132
  @spaces.GPU(duration=300)
133
- def xray_report_generator(Image_file):
134
  if Image_file[-4:] =='.dcm':
135
  png_file = 'DCM2PNG.png'
136
  dicom_to_png(Image_file, png_file)
@@ -143,16 +204,32 @@ def xray_report_generator(Image_file):
143
  result = output[0]['generated_text']
144
  output_paragraph = sentence_case(result)
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  generate_gradcam(Image_file, model_path, output_path, method='gradcam', use_cuda=True)
147
 
148
  grad_cam_image = output_path + 'gradcam_result.png'
149
 
150
- return Image_file,grad_cam_image, output_paragraph
151
-
152
 
153
 
154
  # def save_feedback(feedback):
155
- # feedback_dir = "./Feedback" # Update this to your desired directory
156
  # if not os.path.exists(feedback_dir):
157
  # os.makedirs(feedback_dir)
158
  # feedback_file = os.path.join(feedback_dir, "feedback.txt")
@@ -161,7 +238,6 @@ def xray_report_generator(Image_file):
161
  # return "Feedback submitted successfully!"
162
 
163
 
164
-
165
  def save_feedback(feedback):
166
  feedback_dir = "Chayan/Feedback/" # Update this to your desired directory
167
  if not os.path.exists(feedback_dir):
@@ -177,11 +253,62 @@ def save_feedback(feedback):
177
  print(f"Error saving feedback: {e}")
178
  return "Failed to submit feedback!"
179
 
180
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  # Custom CSS styles
182
  custom_css = """
183
  <style>
184
 
 
 
 
 
 
 
 
 
 
 
185
  #title {
186
  color: green;
187
  font-size: 36px;
@@ -192,15 +319,25 @@ custom_css = """
192
  font-size: 22px;
193
  }
194
 
 
 
 
 
 
 
 
 
 
 
195
 
196
  #submit-btn {
197
- background-color: #1E90FF; /* DodgerBlue */
198
  color: green;
199
  padding: 15px 32px;
200
  text-align: center;
201
  text-decoration: none;
202
  display: inline-block;
203
- font-size: 20px;
204
  margin: 4px 2px;
205
  cursor: pointer;
206
  }
@@ -208,6 +345,7 @@ custom_css = """
208
  background-color: #00FFFF;
209
  }
210
 
 
211
  .intext textarea {
212
  color: green;
213
  font-size: 20px;
@@ -262,16 +400,43 @@ def show_acknowledgment():
262
  yield gr.update(visible=False)
263
 
264
 
265
- with gr.Blocks(css = custom_css) as demo:
266
 
267
  #gr.HTML(custom_css) # Inject custom CSS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  gr.Markdown(
270
- """
271
- <h1 style="color:blue; font-size: 36px; font-weight: bold">Chest X-ray Report Generator</h1>
272
- <p id="description">Upload an X-ray image and get its report with heat-map visualization.</p>
273
- """
274
  )
 
 
 
 
 
 
 
 
 
 
 
275
 
276
  with gr.Row():
277
  inputs = gr.File(label="Upload Chest X-ray Image File", type="filepath")
@@ -279,17 +444,37 @@ with gr.Blocks(css = custom_css) as demo:
279
  with gr.Row():
280
  with gr.Column(scale=1, min_width=300):
281
  outputs1 = gr.Image(label="Image Viewer")
 
282
  with gr.Column(scale=1, min_width=300):
283
  outputs2 = gr.Image(label="Grad_CAM-Visualization")
284
  with gr.Column(scale=1, min_width=300):
285
  outputs3 = gr.Textbox(label="Generated Report", elem_classes = "intext")
 
286
 
287
 
288
- submit_btn = gr.Button("Generate Report", elem_id="submit-btn")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  submit_btn.click(
290
  fn=xray_report_generator,
291
- inputs=inputs,
292
- outputs=[outputs1, outputs2, outputs3])
293
 
294
 
295
  gr.Markdown(
@@ -310,7 +495,6 @@ with gr.Blocks(css = custom_css) as demo:
310
  )
311
 
312
 
313
-
314
  # Feedback section
315
  gr.Markdown(
316
  """
@@ -320,8 +504,10 @@ with gr.Blocks(css = custom_css) as demo:
320
 
321
  with gr.Row():
322
  feedback_input = gr.Textbox(label="Your Feedback", lines=4, placeholder="Enter your feedback here...")
323
- feedback_submit_btn = gr.Button("Submit Feedback", elem_classes="small-button")
324
- feedback_output = gr.Textbox(label="Feedback Status", interactive=True)
 
 
325
 
326
  feedback_submit_btn.click(
327
  fn=save_feedback,
@@ -329,6 +515,7 @@ with gr.Blocks(css = custom_css) as demo:
329
  outputs=feedback_output
330
  )
331
 
 
332
  # Buttons and Markdown for Contact Us and Acknowledgment
333
  with gr.Row():
334
  contact_btn = gr.Button("Contact Us", elem_classes="small-button", variant="secondary")
@@ -339,6 +526,12 @@ with gr.Blocks(css = custom_css) as demo:
339
 
340
  # Update the content and make it visible when the buttons are clicked
341
  contact_btn.click(fn=show_contact_info, outputs=contact_info, show_progress=False)
342
- ack_btn.click(fn=show_acknowledgment, outputs=acknowledgment_info, show_progress=False)
 
 
 
 
 
343
 
344
  demo.launch(share=True)
 
 
1
+ iimport os
2
  import transformers
3
  from transformers import pipeline
4
+
5
+ ### Gradio
6
  import gradio as gr
7
+ from gradio.themes.base import Base
8
+ from gradio.themes.utils import colors, fonts, sizes
9
+ from typing import Union, Iterable
10
+ import time
11
+ #####
12
+
13
+
14
  import cv2
15
  import numpy as np
16
  import pydicom
17
+ import re
 
 
18
 
19
  ##### Libraries For Grad-Cam-View
20
  import os
 
28
  from pytorch_grad_cam.ablation_layer import AblationLayerVit
29
  from transformers import VisionEncoderDecoderModel
30
 
31
+
32
+ from transformers import AutoTokenizer
33
+ import transformers
34
+ import torch
35
+
36
+ from openai import OpenAI
37
+ client = OpenAI()
38
+
39
+ import spaces # Import the spaces module for ZeroGPU
40
+
41
+
42
  @spaces.GPU
43
  def generate_gradcam(image_path, model_path, output_path, method='gradcam', use_cuda=True, aug_smooth=False, eigen_smooth=False):
44
  methods = {
 
66
 
67
  #target_layers = [model.blocks[-1].norm1] ## For ViT model
68
  #target_layers = model.blocks[-1].norm1 ## For EfficientNet-B7 model
69
+ #target_layers = [model.encoder.encoder.layer[-1].layernorm_before] ## For ViT-based VisionEncoderDecoder model
70
+ target_layers = [model.encoder.encoder.layers[-1].blocks[-0].layernorm_after, model.encoder.encoder.layers[-1].blocks[-1].layernorm_after] ## [model.encoder.encoder.layers[-1].blocks[-1].layernorm_before, model.encoder.encoder.layers[-1].blocks[0].layernorm_before] For Swin-based VisionEncoderDecoder model
71
 
72
 
73
  if method == "ablationcam":
 
83
  reshape_transform=reshape_transform)
84
 
85
  rgb_img = cv2.imread(image_path, 1)[:, :, ::-1]
86
+ rgb_img = cv2.resize(rgb_img, (384, 384)) ## (224, 224)
87
  rgb_img = np.float32(rgb_img) / 255
88
  input_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
89
 
 
98
  cv2.imwrite(output_file, cam_image)
99
 
100
 
101
+
102
+ def reshape_transform(tensor, height=12, width=12): ### height=14, width=14 for ViT-based Model
103
  batch_size, token_number, embed_dim = tensor.size()
104
  if token_number < height * width:
105
  pad = torch.zeros(batch_size, height * width - token_number, embed_dim, device=tensor.device)
 
112
  return result
113
 
114
 
 
 
115
  # Example usage:
116
  #image_path = "/home/chayan/CGI_Net/images/images/CXR1353_IM-0230-1001.png"
117
+ model_path = "./Model/"
118
  output_path = "./CAM-Result/"
119
 
120
 
 
125
  formatted_paragraph = '. '.join(formatted_sentences)
126
  return formatted_paragraph
127
 
128
+ def num2sym_bullets(text, bullet='-'):
129
+ """
130
+ Replaces '<num>.' bullet points with a specified symbol and formats the text as a bullet list.
131
+
132
+ Args:
133
+ text (str): Input text containing '<num>.' bullet points.
134
+ bullet (str): The symbol to replace '<num>.' with.
135
+
136
+ Returns:
137
+ str: Modified text with '<num>.' replaced and formatted as a bullet list.
138
+ """
139
+ sentences = re.split(r'<num>\.\s', text)
140
+ formatted_text = '\n'.join(f'{bullet} {sentence.strip()}' for sentence in sentences if sentence.strip())
141
+ return formatted_text
142
+
143
+ def is_cxr(image_path):
144
+ """
145
+ Checks if the uploaded image is a Chest X-ray using basic image processing.
146
+
147
+ Args:
148
+ image_path (str): Path to the uploaded image.
149
+
150
+ Returns:
151
+ bool: True if the image is likely a Chest X-ray, False otherwise.
152
+ """
153
+ try:
154
+
155
+ image = cv2.imread(image_path)
156
+
157
+ if image is None:
158
+ raise ValueError("Invalid image path.")
159
+
160
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
161
+ color_std = np.std(image, axis=2).mean()
162
+
163
+ if color_std > 0:
164
+ return False
165
+
166
+ return True
167
+
168
+ except Exception as e:
169
+ print(f"Error processing image: {e}")
170
+ return False
171
+
172
  def dicom_to_png(dicom_file, png_file):
173
  # Load DICOM file
174
  dicom_data = pydicom.dcmread(dicom_file)
 
186
  return img
187
 
188
 
189
+ Image_Captioner = pipeline("image-to-text", model = "./Model/", device = 0)
190
 
191
  data_dir = "./CAM-Result"
192
 
193
  @spaces.GPU(duration=300)
194
+ def xray_report_generator(Image_file, Query):
195
  if Image_file[-4:] =='.dcm':
196
  png_file = 'DCM2PNG.png'
197
  dicom_to_png(Image_file, png_file)
 
204
  result = output[0]['generated_text']
205
  output_paragraph = sentence_case(result)
206
 
207
+ final_response = num2sym_bullets(output_paragraph, bullet='-')
208
+
209
+ query_prompt = f""" You are analyzing the doctor's query based on the patient's history and the generated chest X-ray report. Extract only the information relevant to the query.
210
+ If the report mentions the queried condition, write only the exact wording without any introduction. If the condition is not mentioned, respond with: 'No relevant findings related to [query condition].'.
211
+ """
212
+
213
+ #If the condition is negated, respond with: 'There is no [query condition].'.
214
+
215
+ completion = client.chat.completions.create(
216
+ model="gpt-4-turbo", ### gpt-4-turbo ### gpt-3.5-turbo-0125
217
+ messages=[
218
+ {"role": "system", "content": query_prompt},
219
+ {"role": "user", "content": f"Generated Report: {final_response}\nHistory/Doctor's Query: {Query}"}
220
+ ],
221
+ temperature=0.2)
222
+ query_response = completion.choices[0].message.content
223
+
224
  generate_gradcam(Image_file, model_path, output_path, method='gradcam', use_cuda=True)
225
 
226
  grad_cam_image = output_path + 'gradcam_result.png'
227
 
228
+ return grad_cam_image, final_response, query_response
 
229
 
230
 
231
  # def save_feedback(feedback):
232
+ # feedback_dir = "Chayan/Feedback/" # Update this to your desired directory
233
  # if not os.path.exists(feedback_dir):
234
  # os.makedirs(feedback_dir)
235
  # feedback_file = os.path.join(feedback_dir, "feedback.txt")
 
238
  # return "Feedback submitted successfully!"
239
 
240
 
 
241
  def save_feedback(feedback):
242
  feedback_dir = "Chayan/Feedback/" # Update this to your desired directory
243
  if not os.path.exists(feedback_dir):
 
253
  print(f"Error saving feedback: {e}")
254
  return "Failed to submit feedback!"
255
 
256
+
257
+ # Custom Theme Definition
258
+ class Seafoam(Base):
259
+ def __init__(
260
+ self,
261
+ *,
262
+ primary_hue: Union[colors.Color, str] = colors.emerald,
263
+ secondary_hue: Union[colors.Color, str] = colors.blue,
264
+ neutral_hue: Union[colors.Color, str] = colors.gray,
265
+ spacing_size: Union[sizes.Size, str] = sizes.spacing_md,
266
+ radius_size: Union[sizes.Size, str] = sizes.radius_md,
267
+ text_size: Union[sizes.Size, str] = sizes.text_lg,
268
+ font: Union[fonts.Font, str, Iterable[Union[fonts.Font, str]]] = (
269
+ fonts.GoogleFont("Quicksand"),
270
+ "ui-sans-serif",
271
+ "sans-serif",
272
+ ),
273
+ font_mono: Union[fonts.Font, str, Iterable[Union[fonts.Font, str]]] = (
274
+ fonts.GoogleFont("IBM Plex Mono"),
275
+ "ui-monospace",
276
+ "monospace",
277
+ ),
278
+ ):
279
+ super().__init__(
280
+ primary_hue=primary_hue,
281
+ secondary_hue=secondary_hue,
282
+ neutral_hue=neutral_hue,
283
+ spacing_size=spacing_size,
284
+ radius_size=radius_size,
285
+ text_size=text_size,
286
+ font=font,
287
+ font_mono=font_mono,
288
+ )
289
+
290
+ self.set(
291
+ body_background_fill="linear-gradient(114.2deg, rgba(184,215,21,1) -15.3%, rgba(21,215,98,1) 14.5%, rgba(21,215,182,1) 38.7%, rgba(129,189,240,1) 58.8%, rgba(219,108,205,1) 77.3%, rgba(240,129,129,1) 88.5%)"
292
+ )
293
+ # Initialize the theme
294
+ seafoam = Seafoam()
295
+
296
+
297
+
298
  # Custom CSS styles
299
  custom_css = """
300
  <style>
301
 
302
+ /* Set background color for the entire Gradio app */
303
+ body, .gradio-container {
304
+ background-color: #f2f7f5 !important;
305
+ }
306
+
307
+ /* Optional: Add padding or margin for aesthetics */
308
+ .gradio-container {
309
+ padding: 20px;
310
+ }
311
+
312
  #title {
313
  color: green;
314
  font-size: 36px;
 
319
  font-size: 22px;
320
  }
321
 
322
+ #title-row {
323
+ display: flex;
324
+ align-items: center;
325
+ gap: 10px;
326
+ margin-bottom: 0px;
327
+ }
328
+ #title-header h1 {
329
+ margin: 0;
330
+ }
331
+
332
 
333
  #submit-btn {
334
+ background-color: #f5dec6; /* Banana leaf */
335
  color: green;
336
  padding: 15px 32px;
337
  text-align: center;
338
  text-decoration: none;
339
  display: inline-block;
340
+ font-size: 30px;
341
  margin: 4px 2px;
342
  cursor: pointer;
343
  }
 
345
  background-color: #00FFFF;
346
  }
347
 
348
+
349
  .intext textarea {
350
  color: green;
351
  font-size: 20px;
 
400
  yield gr.update(visible=False)
401
 
402
 
403
+ with gr.Blocks(theme=seafoam, css=custom_css) as demo:
404
 
405
  #gr.HTML(custom_css) # Inject custom CSS
406
+
407
+
408
+ with gr.Row(elem_id="title-row"):
409
+ with gr.Column(scale=0):
410
+ gr.Image(
411
+ value="./AURA-CXR-Logo.png",
412
+ show_label=False,
413
+ width=60,
414
+ container=False
415
+ )
416
+ with gr.Column():
417
+ gr.Markdown(
418
+ """
419
+ <h1 style="color:blue; font-size: 32px; font-weight: bold; margin: 0;">
420
+ AURA-CXR: Explainable Diagnosis of Chest Diseases from X-rays
421
+ </h1>
422
+ """,
423
+ elem_id="title-header"
424
+ )
425
 
426
  gr.Markdown(
427
+ "<p id='description'>Upload an X-ray image and get its report with heat-map visualization.</p>"
 
 
 
428
  )
429
+
430
+
431
+
432
+ # gr.Markdown(
433
+ # """
434
+ # <h1 style="color:blue; font-size: 36px; font-weight: bold; margin: 0;">AURA-CXR: Explainable Diagnosis of Chest Diseases from X-rays</h1>
435
+ # <p id="description">Upload an X-ray image and get its report with heat-map visualization.</p>
436
+ # """
437
+ # )
438
+
439
+ #<h1 style="color:blue; font-size: 36px; font-weight: bold">AURA-CXR: Explainable Diagnosis of Chest Diseases from X-rays</h1>
440
 
441
  with gr.Row():
442
  inputs = gr.File(label="Upload Chest X-ray Image File", type="filepath")
 
444
  with gr.Row():
445
  with gr.Column(scale=1, min_width=300):
446
  outputs1 = gr.Image(label="Image Viewer")
447
+ history_query = gr.Textbox(label="History/Doctor's Query", elem_classes="intext")
448
  with gr.Column(scale=1, min_width=300):
449
  outputs2 = gr.Image(label="Grad_CAM-Visualization")
450
  with gr.Column(scale=1, min_width=300):
451
  outputs3 = gr.Textbox(label="Generated Report", elem_classes = "intext")
452
+ outputs4 = gr.Textbox(label = "Query's Response", elem_classes = "intext")
453
 
454
 
455
+ submit_btn = gr.Button("Generate Report", elem_id="submit-btn", variant="primary")
456
+
457
+ def show_image(file_path):
458
+ if is_cxr(file_path): # Check if it's a valid Chest X-ray
459
+ return file_path, "Valid Image" # Show the image in Image Viewer
460
+ else:
461
+ return None, "Invalid image. Please upload a proper Chest X-ray."
462
+
463
+
464
+ # Show the uploaded image immediately in the Image Viewer
465
+ inputs.change(
466
+ fn=show_image, # Calls the function to return the same file path
467
+ inputs=inputs,
468
+ outputs=[outputs1, outputs3]
469
+ )
470
+
471
+
472
+
473
+
474
  submit_btn.click(
475
  fn=xray_report_generator,
476
+ inputs=[inputs,history_query],
477
+ outputs=[outputs2, outputs3, outputs4])
478
 
479
 
480
  gr.Markdown(
 
495
  )
496
 
497
 
 
498
  # Feedback section
499
  gr.Markdown(
500
  """
 
504
 
505
  with gr.Row():
506
  feedback_input = gr.Textbox(label="Your Feedback", lines=4, placeholder="Enter your feedback here...")
507
+ feedback_submit_btn = gr.Button("Submit Feedback", elem_classes="small-button", variant="secondary")
508
+ feedback_output = gr.Textbox(label="Feedback Status", interactive=False)
509
+
510
+
511
 
512
  feedback_submit_btn.click(
513
  fn=save_feedback,
 
515
  outputs=feedback_output
516
  )
517
 
518
+
519
  # Buttons and Markdown for Contact Us and Acknowledgment
520
  with gr.Row():
521
  contact_btn = gr.Button("Contact Us", elem_classes="small-button", variant="secondary")
 
526
 
527
  # Update the content and make it visible when the buttons are clicked
528
  contact_btn.click(fn=show_contact_info, outputs=contact_info, show_progress=False)
529
+ ack_btn.click(fn=show_acknowledgment, outputs=acknowledgment_info, show_progress=False)
530
+
531
+ # Update the content and make it visible when the buttons are clicked
532
+ # contact_btn.click(fn=show_contact_info, outputs=contact_info, show_progress=False)
533
+ # ack_btn.click(fn=show_acknowledgment, outputs=acknowledgment_info, show_progress=False)
534
+
535
 
536
  demo.launch(share=True)
537
+