rishirajbal commited on
Commit
270b631
·
verified ·
1 Parent(s): 448426c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import tensorflow as tf
4
  import numpy as np
5
  import requests
 
6
 
7
  from langchain_groq import ChatGroq
8
  from langchain.agents import initialize_agent
@@ -35,23 +36,21 @@ def classify_image_and_stats(image_input):
35
  img_norm = img / 255.0
36
  img_batch = np.expand_dims(img_norm, axis=0)
37
 
38
- prediction = model.predict(img_batch)[0] # (256, 256, 1)
39
  mask = (prediction > 0.5).astype(np.uint8)
40
 
41
  if mask.ndim == 3 and mask.shape[-1] == 1:
42
  mask = np.squeeze(mask, axis=-1)
43
 
44
- # Tumor stats
45
  tumor_area = np.sum(mask)
46
  total_area = IMG_HEIGHT * IMG_WIDTH
47
  tumor_ratio = tumor_area / total_area
48
 
49
- tumor_label = "Tumor Detected" if tumor_ratio > 0.005 else "No Tumor Detected"
50
 
51
- # === Overlay mask on original ===
52
- overlay = np.array(img) # original resized input
53
  red_mask = np.zeros_like(overlay)
54
- red_mask[..., 0] = mask * 255 # Red channel
55
 
56
  overlay_img = np.clip(0.6 * overlay + 0.4 * red_mask, 0, 255).astype(np.uint8)
57
 
@@ -65,7 +64,7 @@ def classify_image_and_stats(image_input):
65
  return overlay_img, stats
66
 
67
 
68
- # === Gradio handler ===
69
  def rishigpt_handler(image_input, groq_api_key):
70
  os.environ["GROQ_API_KEY"] = groq_api_key
71
 
@@ -99,22 +98,24 @@ def rishigpt_handler(image_input, groq_api_key):
99
  user_query = "Give me the segmentation details"
100
  classification = agent.run(user_query)
101
 
102
- # Better prompt + output parser
103
  prompt = PromptTemplate(
104
  input_variables=["result"],
105
  template=(
106
  "You are a compassionate AI radiologist. "
107
  "Read this tumor analysis result: {result}. "
108
- "Summarize the situation like you're talking to the patient in calm, clear language. "
109
- "Add any recommendations for next steps too, but keep it easy to understand."
110
  )
111
  )
112
 
113
  chain = prompt | llm
114
- description = chain.invoke({"result": classification}).content.strip()
115
-
116
- return overlay_img, description
117
 
 
 
 
 
 
 
118
 
119
  # === Gradio UI ===
120
  inputs = [
@@ -124,7 +125,7 @@ inputs = [
124
 
125
  outputs = [
126
  gr.Image(type="numpy", label="Overlay: Brain MRI + Tumor Mask"),
127
- gr.Textbox(label="Doctor's Explanation")
128
  ]
129
 
130
  if __name__ == "__main__":
@@ -132,6 +133,6 @@ if __name__ == "__main__":
132
  fn=rishigpt_handler,
133
  inputs=inputs,
134
  outputs=outputs,
135
- title="🧠 RishiGPT Medical Brain Segmentation",
136
- description="UNet++ Brain Tumor Segmentation with mask overlay, detailed stats, and human-like explanation."
137
  ).launch()
 
3
  import tensorflow as tf
4
  import numpy as np
5
  import requests
6
+ import time
7
 
8
  from langchain_groq import ChatGroq
9
  from langchain.agents import initialize_agent
 
36
  img_norm = img / 255.0
37
  img_batch = np.expand_dims(img_norm, axis=0)
38
 
39
+ prediction = model.predict(img_batch)[0]
40
  mask = (prediction > 0.5).astype(np.uint8)
41
 
42
  if mask.ndim == 3 and mask.shape[-1] == 1:
43
  mask = np.squeeze(mask, axis=-1)
44
 
 
45
  tumor_area = np.sum(mask)
46
  total_area = IMG_HEIGHT * IMG_WIDTH
47
  tumor_ratio = tumor_area / total_area
48
 
49
+ tumor_label = "Tumor Detected" if tumor_ratio > 0.0025 else "No Tumor Detected"
50
 
51
+ overlay = np.array(img)
 
52
  red_mask = np.zeros_like(overlay)
53
+ red_mask[..., 0] = mask * 255
54
 
55
  overlay_img = np.clip(0.6 * overlay + 0.4 * red_mask, 0, 255).astype(np.uint8)
56
 
 
64
  return overlay_img, stats
65
 
66
 
67
+ # === Gradio handler with typing effect ===
68
  def rishigpt_handler(image_input, groq_api_key):
69
  os.environ["GROQ_API_KEY"] = groq_api_key
70
 
 
98
  user_query = "Give me the segmentation details"
99
  classification = agent.run(user_query)
100
 
 
101
  prompt = PromptTemplate(
102
  input_variables=["result"],
103
  template=(
104
  "You are a compassionate AI radiologist. "
105
  "Read this tumor analysis result: {result}. "
106
+ "Summarize the situation for the patient in natural paragraphs, calm, clear tone, with next steps."
 
107
  )
108
  )
109
 
110
  chain = prompt | llm
111
+ final_text = chain.invoke({"result": classification}).content.strip()
 
 
112
 
113
+ # === Yield mask and typing chunks ===
114
+ displayed_text = ""
115
+ for char in final_text:
116
+ displayed_text += char
117
+ time.sleep(0.015)
118
+ yield overlay_img, displayed_text
119
 
120
  # === Gradio UI ===
121
  inputs = [
 
125
 
126
  outputs = [
127
  gr.Image(type="numpy", label="Overlay: Brain MRI + Tumor Mask"),
128
+ gr.Textbox(label="Doctor's Explanation (Typing...)")
129
  ]
130
 
131
  if __name__ == "__main__":
 
133
  fn=rishigpt_handler,
134
  inputs=inputs,
135
  outputs=outputs,
136
+ title="RishiGPT Medical Brain Segmentation",
137
+ description="UNet++ Brain Tumor Segmentation with live mask overlay, detailed stats, and human-like typing explanation."
138
  ).launch()