Ali2206 commited on
Commit
41c4b2a
·
verified ·
1 Parent(s): 3539dd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -38
app.py CHANGED
@@ -1,5 +1,3 @@
1
- # Full updated app.py with TOOL_CALLS rendered + copy.deepcopy fix for Gradio UI updates
2
-
3
  import sys
4
  import os
5
  import pandas as pd
@@ -16,7 +14,9 @@ import subprocess
16
  import traceback
17
  import torch
18
  import copy
 
19
 
 
20
  os.environ["VLLM_LOGGING_LEVEL"] = "DEBUG"
21
  if not torch.cuda.is_available():
22
  print("No GPU detected. Forcing CPU mode by setting CUDA_VISIBLE_DEVICES to an empty string.")
@@ -65,7 +65,7 @@ def extract_priority_pages(file_path: str, max_pages: int = 20) -> str:
65
  text_chunks.append(f"=== Page {i+1} ===\n{text.strip()}")
66
  for i, page in enumerate(pdf.pages[3:max_pages], start=4):
67
  page_text = page.extract_text() or ""
68
- if any(re.search(rf'\\b{kw}\\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
69
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
70
  return "\n\n".join(text_chunks)
71
  except Exception as e:
@@ -153,35 +153,39 @@ def init_agent():
153
  def create_ui(agent):
154
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
155
  gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
 
 
156
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
157
  file_upload = gr.File(file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
158
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
159
  send_btn = gr.Button("Analyze", variant="primary")
160
  download_output = gr.File(label="Download Full Report")
161
 
162
- def analyze(message: str, history: list, files: list):
163
- try:
164
- history.append({"role": "user", "content": message})
165
- history.append({"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."})
166
- yield copy.deepcopy(history), None
167
-
168
- extracted = ""
169
- file_hash_value = ""
170
- if files:
171
- with ThreadPoolExecutor(max_workers=4) as executor:
172
- futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files]
173
- results = []
174
- for future in as_completed(futures):
175
- try:
176
- res = future.result()
177
- results.append(sanitize_utf8(res))
178
- except Exception as e:
179
- print("❌ Error in file processing:", str(e))
180
- traceback.print_exc()
181
- extracted = "\n".join(results)
182
- file_hash_value = file_hash(files[0].name)
183
-
184
- prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
 
 
185
  1. List potential missed diagnoses
186
  2. Flag any medication conflicts
187
  3. Note incomplete assessments
@@ -192,13 +196,14 @@ Medical Records:
192
 
193
  ### Potential Oversights:
194
  """
195
- print("🔎 Generated prompt:")
196
- print(prompt)
197
 
198
- full_response = ""
199
- response_chunks = []
200
- tool_calls_rendered = []
201
 
 
202
  for chunk in agent.run_gradio_chat(
203
  message=prompt,
204
  history=[],
@@ -229,7 +234,7 @@ Medical Records:
229
  else:
230
  history.append({"role": "assistant", "content": display_response})
231
 
232
- yield copy.deepcopy(history), None
233
 
234
  full_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL).strip()
235
  full_response = full_response.replace('[TxAgent]', '').strip()
@@ -245,15 +250,17 @@ Medical Records:
245
  else:
246
  history.append({"role": "assistant", "content": full_response})
247
 
248
- yield copy.deepcopy(history), report_path if report_path and os.path.exists(report_path) else None
249
 
250
  except Exception as e:
251
  history.append({"role": "assistant", "content": f"❌ An error occurred in analyze: {str(e)}"})
252
  traceback.print_exc()
253
- yield copy.deepcopy(history), None
254
 
255
- send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
256
- msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
 
 
257
  return demo
258
 
259
  if __name__ == "__main__":
@@ -266,8 +273,9 @@ if __name__ == "__main__":
266
  server_port=7860,
267
  show_error=True,
268
  allowed_paths=[report_dir],
269
- share=False
 
270
  )
271
  except Exception as e:
272
  print("❌ Fatal error during launch:", str(e))
273
- traceback.print_exc()
 
 
 
1
  import sys
2
  import os
3
  import pandas as pd
 
14
  import traceback
15
  import torch
16
  import copy
17
+ import time
18
 
19
+ # Configure environment variables
20
  os.environ["VLLM_LOGGING_LEVEL"] = "DEBUG"
21
  if not torch.cuda.is_available():
22
  print("No GPU detected. Forcing CPU mode by setting CUDA_VISIBLE_DEVICES to an empty string.")
 
65
  text_chunks.append(f"=== Page {i+1} ===\n{text.strip()}")
66
  for i, page in enumerate(pdf.pages[3:max_pages], start=4):
67
  page_text = page.extract_text() or ""
68
+ if any(re.search(rf'\b{kw}\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
69
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
70
  return "\n\n".join(text_chunks)
71
  except Exception as e:
 
153
  def create_ui(agent):
154
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
155
  gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
156
+ # Persistent conversation state
157
+ conversation_state = gr.State([])
158
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
159
  file_upload = gr.File(file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
160
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
161
  send_btn = gr.Button("Analyze", variant="primary")
162
  download_output = gr.File(label="Download Full Report")
163
 
164
+ def analyze(message: str, state: list, files: list):
165
+ if state is None:
166
+ state = []
167
+ history = state
168
+ history.append({"role": "user", "content": message})
169
+ history.append({"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."})
170
+ yield copy.deepcopy(history), None, copy.deepcopy(history)
171
+
172
+ extracted = ""
173
+ file_hash_value = ""
174
+ if files:
175
+ with ThreadPoolExecutor(max_workers=4) as executor:
176
+ futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files]
177
+ results = []
178
+ for future in as_completed(futures):
179
+ try:
180
+ res = future.result()
181
+ results.append(sanitize_utf8(res))
182
+ except Exception as e:
183
+ print("❌ Error in file processing:", str(e))
184
+ traceback.print_exc()
185
+ extracted = "\n".join(results)
186
+ file_hash_value = file_hash(files[0].name)
187
+
188
+ prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
189
  1. List potential missed diagnoses
190
  2. Flag any medication conflicts
191
  3. Note incomplete assessments
 
196
 
197
  ### Potential Oversights:
198
  """
199
+ print("🔎 Generated prompt:")
200
+ print(prompt)
201
 
202
+ full_response = ""
203
+ response_chunks = []
204
+ tool_calls_rendered = []
205
 
206
+ try:
207
  for chunk in agent.run_gradio_chat(
208
  message=prompt,
209
  history=[],
 
234
  else:
235
  history.append({"role": "assistant", "content": display_response})
236
 
237
+ yield copy.deepcopy(history), None, copy.deepcopy(history)
238
 
239
  full_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL).strip()
240
  full_response = full_response.replace('[TxAgent]', '').strip()
 
250
  else:
251
  history.append({"role": "assistant", "content": full_response})
252
 
253
+ yield copy.deepcopy(history), report_path if report_path and os.path.exists(report_path) else None, copy.deepcopy(history)
254
 
255
  except Exception as e:
256
  history.append({"role": "assistant", "content": f"❌ An error occurred in analyze: {str(e)}"})
257
  traceback.print_exc()
258
+ yield copy.deepcopy(history), None, copy.deepcopy(history)
259
 
260
+ send_btn.click(analyze, inputs=[msg_input, conversation_state, file_upload],
261
+ outputs=[chatbot, download_output, conversation_state])
262
+ msg_input.submit(analyze, inputs=[msg_input, conversation_state, file_upload],
263
+ outputs=[chatbot, download_output, conversation_state])
264
  return demo
265
 
266
  if __name__ == "__main__":
 
273
  server_port=7860,
274
  show_error=True,
275
  allowed_paths=[report_dir],
276
+ share=False,
277
+ ssr=False # Disable SSR to improve UI updates
278
  )
279
  except Exception as e:
280
  print("❌ Fatal error during launch:", str(e))
281
+ traceback.print_exc()