Ali2206 commited on
Commit
4d9eb46
·
verified ·
1 Parent(s): 5622869

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -34
app.py CHANGED
@@ -60,7 +60,7 @@ def extract_priority_pages(file_path: str, max_pages: int = 20) -> str:
60
  text_chunks.append(f"=== Page {i+1} ===\n{(page.extract_text() or '').strip()}")
61
  for i, page in enumerate(pdf.pages[3:max_pages], start=4):
62
  page_text = page.extract_text() or ""
63
- if any(re.search(rf'\b{kw}\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
64
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
65
  return "\n\n".join(text_chunks)
66
  except Exception as e:
@@ -130,7 +130,7 @@ def init_agent():
130
  enable_checker=True,
131
  step_rag_num=8,
132
  seed=100,
133
- additional_default_tools=[]
134
  )
135
  agent.init_model()
136
  return agent
@@ -141,26 +141,30 @@ def create_ui(agent: TxAgent):
141
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
142
 
143
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
144
- file_upload = gr.File(label="Upload Medical Records", file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
 
 
 
 
145
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
146
  send_btn = gr.Button("Analyze", variant="primary")
147
  conversation_state = gr.State([])
148
  download_output = gr.File(label="Download Full Report")
149
 
150
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
 
151
  try:
152
  history.append({"role": "user", "content": message})
153
- history.append({"role": "assistant", "content": "Analyzing records for potential oversights..."})
154
  yield history, None
155
 
156
  extracted_data = ""
157
  file_hash_value = ""
158
- if files:
159
  with ThreadPoolExecutor(max_workers=4) as executor:
160
  futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
161
- results = [sanitize_utf8(f.result()) for f in as_completed(futures)]
162
- extracted_data = "\n".join(results)
163
- file_hash_value = file_hash(files[0].name)
164
 
165
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
166
  1. List potential missed diagnoses
@@ -172,7 +176,7 @@ Medical Records:\n{extracted_data[:15000]}
172
 
173
  ### Potential Oversights:\n"""
174
 
175
- response = []
176
  for chunk in agent.run_gradio_chat(
177
  message=analysis_prompt,
178
  history=[],
@@ -183,23 +187,29 @@ Medical Records:\n{extracted_data[:15000]}
183
  conversation=conversation
184
  ):
185
  if isinstance(chunk, str):
186
- response.append(chunk)
187
  elif isinstance(chunk, list):
188
- response.extend([c.content for c in chunk if hasattr(c, 'content')])
189
- history[-1] = {"role": "assistant", "content": "".join(response).strip()}
 
190
  yield history, None
191
 
192
- final_output = "".join(response).strip()
193
  if not final_output:
194
  final_output = "No clear oversights identified. Recommend comprehensive review."
195
- history[-1] = {"role": "assistant", "content": final_output}
196
 
197
- report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
198
- return history, report_path if os.path.exists(report_path) else None
 
 
 
 
 
 
199
 
200
  except Exception as e:
201
  history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
202
- return history, None
203
 
204
  inputs = [msg_input, chatbot, conversation_state, file_upload]
205
  outputs = [chatbot, download_output]
@@ -218,22 +228,6 @@ if __name__ == "__main__":
218
  print("Initializing medical analysis agent...")
219
  agent = init_agent()
220
 
221
- print("Performing warm-up call...")
222
- try:
223
- warm_up = agent.run_gradio_chat(
224
- message="Warm up",
225
- history=[],
226
- temperature=0.1,
227
- max_new_tokens=10,
228
- max_token=100,
229
- call_agent=False,
230
- conversation=[]
231
- )
232
- for _ in warm_up:
233
- pass
234
- except Exception as e:
235
- print(f"Warm-up error: {str(e)}")
236
-
237
  print("Launching interface...")
238
  demo = create_ui(agent)
239
  demo.queue().launch(
@@ -241,4 +235,4 @@ if __name__ == "__main__":
241
  server_port=7860,
242
  show_error=True,
243
  share=True
244
- )
 
60
  text_chunks.append(f"=== Page {i+1} ===\n{(page.extract_text() or '').strip()}")
61
  for i, page in enumerate(pdf.pages[3:max_pages], start=4):
62
  page_text = page.extract_text() or ""
63
+ if any(re.search(rf'\\b{kw}\\b', page_text.lower()) for kw in MEDICAL_KEYWORDS):
64
  text_chunks.append(f"=== Page {i} ===\n{page_text.strip()}")
65
  return "\n\n".join(text_chunks)
66
  except Exception as e:
 
130
  enable_checker=True,
131
  step_rag_num=8,
132
  seed=100,
133
+ additional_default_tools=[],
134
  )
135
  agent.init_model()
136
  return agent
 
141
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
142
 
143
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
144
+ file_upload = gr.File(
145
+ label="Upload Medical Records",
146
+ file_types=[".pdf", ".csv", ".xls", ".xlsx"],
147
+ file_count="multiple"
148
+ )
149
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
150
  send_btn = gr.Button("Analyze", variant="primary")
151
  conversation_state = gr.State([])
152
  download_output = gr.File(label="Download Full Report")
153
 
154
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
155
+ start_time = time.time()
156
  try:
157
  history.append({"role": "user", "content": message})
158
+ history.append({"role": "assistant", "content": "Analyzing records for potential oversights..."})
159
  yield history, None
160
 
161
  extracted_data = ""
162
  file_hash_value = ""
163
+ if files and isinstance(files, list):
164
  with ThreadPoolExecutor(max_workers=4) as executor:
165
  futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
166
+ extracted_data = "\n".join([sanitize_utf8(f.result()) for f in as_completed(futures)])
167
+ file_hash_value = file_hash(files[0].name) if files else ""
 
168
 
169
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
170
  1. List potential missed diagnoses
 
176
 
177
  ### Potential Oversights:\n"""
178
 
179
+ response = ""
180
  for chunk in agent.run_gradio_chat(
181
  message=analysis_prompt,
182
  history=[],
 
187
  conversation=conversation
188
  ):
189
  if isinstance(chunk, str):
190
+ response += chunk
191
  elif isinstance(chunk, list):
192
+ response += "".join([c.content for c in chunk if hasattr(c, 'content')])
193
+
194
+ history[-1]["content"] = response.replace("[TOOL_CALLS]", "").strip()
195
  yield history, None
196
 
197
+ final_output = response.replace("[TOOL_CALLS]", "").strip()
198
  if not final_output:
199
  final_output = "No clear oversights identified. Recommend comprehensive review."
 
200
 
201
+ report_path = None
202
+ if file_hash_value:
203
+ possible_report = os.path.join(report_dir, f"{file_hash_value}_report.txt")
204
+ if os.path.exists(possible_report):
205
+ report_path = possible_report
206
+
207
+ history[-1] = {"role": "assistant", "content": final_output}
208
+ yield history, report_path
209
 
210
  except Exception as e:
211
  history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
212
+ yield history, None
213
 
214
  inputs = [msg_input, chatbot, conversation_state, file_upload]
215
  outputs = [chatbot, download_output]
 
228
  print("Initializing medical analysis agent...")
229
  agent = init_agent()
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  print("Launching interface...")
232
  demo = create_ui(agent)
233
  demo.queue().launch(
 
235
  server_port=7860,
236
  show_error=True,
237
  share=True
238
+ )