Ali2206 commited on
Commit
1ebbef1
·
verified ·
1 Parent(s): 71d01eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -35
app.py CHANGED
@@ -24,10 +24,12 @@ os.makedirs(base_dir, exist_ok=True)
24
  model_cache_dir = os.path.join(base_dir, "txagent_models")
25
  tool_cache_dir = os.path.join(base_dir, "tool_cache")
26
  file_cache_dir = os.path.join(base_dir, "cache")
 
27
 
28
  os.makedirs(model_cache_dir, exist_ok=True)
29
  os.makedirs(tool_cache_dir, exist_ok=True)
30
  os.makedirs(file_cache_dir, exist_ok=True)
 
31
 
32
  os.environ.update({
33
  "TRANSFORMERS_CACHE": model_cache_dir,
@@ -73,11 +75,7 @@ def convert_file_to_json(file_path: str, file_type: str) -> str:
73
 
74
  if file_type == "pdf":
75
  text = extract_priority_pages(file_path)
76
- result = json.dumps({
77
- "filename": os.path.basename(file_path),
78
- "content": text,
79
- "status": "initial"
80
- })
81
  Thread(target=full_pdf_processing, args=(file_path, h)).start()
82
 
83
  elif file_type == "csv":
@@ -108,25 +106,19 @@ def full_pdf_processing(file_path: str, file_hash: str):
108
  cache_path = os.path.join(file_cache_dir, f"{file_hash}_full.json")
109
  if os.path.exists(cache_path):
110
  return
111
-
112
  with pdfplumber.open(file_path) as pdf:
113
  full_text = "\n".join([f"=== Page {i+1} ===\n{(page.extract_text() or '').strip()}" for i, page in enumerate(pdf.pages)])
114
-
115
- result = json.dumps({
116
- "filename": os.path.basename(file_path),
117
- "content": full_text,
118
- "status": "complete"
119
- })
120
-
121
  with open(cache_path, "w", encoding="utf-8") as f:
122
  f.write(result)
 
 
123
  except Exception as e:
124
  print(f"Background processing failed: {str(e)}")
125
 
126
  def init_agent():
127
  default_tool_path = os.path.abspath("data/new_tool.json")
128
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
129
-
130
  if not os.path.exists(target_tool_path):
131
  shutil.copy(default_tool_path, target_tool_path)
132
 
@@ -149,23 +141,31 @@ def create_ui(agent: TxAgent):
149
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
150
 
151
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
152
- file_upload = gr.File(label="Upload Medical Records", file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
 
 
 
 
153
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
154
  send_btn = gr.Button("Analyze", variant="primary")
155
  conversation_state = gr.State([])
 
156
 
157
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
158
  start_time = time.time()
159
  try:
160
  history.append({"role": "user", "content": message})
161
  history.append({"role": "assistant", "content": "Analyzing records for potential oversights..."})
162
- yield history
163
 
164
  extracted_data = ""
 
165
  if files:
166
  with ThreadPoolExecutor(max_workers=4) as executor:
167
  futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
168
- extracted_data = "\n".join([sanitize_utf8(f.result()) for f in as_completed(futures)])
 
 
169
 
170
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
171
  1. List potential missed diagnoses
@@ -173,15 +173,9 @@ def create_ui(agent: TxAgent):
173
  3. Note incomplete assessments
174
  4. Highlight abnormal results needing follow-up
175
 
176
- Medical Records:
177
- {extracted_data[:15000]}
178
-
179
- Provide ONLY the potential oversights in this format:
180
 
181
- ### Potential Oversights:
182
- 1. [Missed diagnosis] - [Evidence from records]
183
- 2. [Medication issue] - [Supporting data]
184
- 3. [Assessment gap] - [Relevant findings]"""
185
 
186
  response = []
187
  for chunk in agent.run_gradio_chat(
@@ -197,28 +191,29 @@ Provide ONLY the potential oversights in this format:
197
  response.append(chunk)
198
  elif isinstance(chunk, list):
199
  response.extend([c.content for c in chunk if hasattr(c, 'content')])
 
200
  if len(response) % 3 == 0:
201
  history[-1] = {"role": "assistant", "content": "".join(response).strip()}
202
- yield history
203
 
204
  final_output = "".join(response).strip()
205
  if not final_output:
206
  final_output = "No clear oversights identified. Recommend comprehensive review."
207
-
208
  if not final_output.startswith(("1.", "-", "*", "#")):
209
  final_output = "• " + final_output.replace("\n", "\n• ")
 
210
 
211
- history[-1] = {"role": "assistant", "content": f"### Potential Clinical Oversights:\n{final_output}"}
212
- print(f"Analysis completed in {time.time()-start_time:.2f}s")
213
- yield history
214
 
215
  except Exception as e:
216
  history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
217
- yield history
218
 
219
  inputs = [msg_input, chatbot, conversation_state, file_upload]
220
- send_btn.click(analyze_potential_oversights, inputs=inputs, outputs=chatbot)
221
- msg_input.submit(analyze_potential_oversights, inputs=inputs, outputs=chatbot)
 
222
 
223
  gr.Examples([
224
  ["What might have been missed in this patient's treatment?"],
@@ -231,7 +226,6 @@ Provide ONLY the potential oversights in this format:
231
  if __name__ == "__main__":
232
  print("Initializing medical analysis agent...")
233
  agent = init_agent()
234
-
235
  print("Launching interface...")
236
  demo = create_ui(agent)
237
  demo.queue().launch(
@@ -239,4 +233,4 @@ if __name__ == "__main__":
239
  server_port=7860,
240
  show_error=True,
241
  share=True
242
- )
 
24
  model_cache_dir = os.path.join(base_dir, "txagent_models")
25
  tool_cache_dir = os.path.join(base_dir, "tool_cache")
26
  file_cache_dir = os.path.join(base_dir, "cache")
27
+ report_dir = os.path.join(base_dir, "reports")
28
 
29
  os.makedirs(model_cache_dir, exist_ok=True)
30
  os.makedirs(tool_cache_dir, exist_ok=True)
31
  os.makedirs(file_cache_dir, exist_ok=True)
32
+ os.makedirs(report_dir, exist_ok=True)
33
 
34
  os.environ.update({
35
  "TRANSFORMERS_CACHE": model_cache_dir,
 
75
 
76
  if file_type == "pdf":
77
  text = extract_priority_pages(file_path)
78
+ result = json.dumps({"filename": os.path.basename(file_path), "content": text, "status": "initial"})
 
 
 
 
79
  Thread(target=full_pdf_processing, args=(file_path, h)).start()
80
 
81
  elif file_type == "csv":
 
106
  cache_path = os.path.join(file_cache_dir, f"{file_hash}_full.json")
107
  if os.path.exists(cache_path):
108
  return
 
109
  with pdfplumber.open(file_path) as pdf:
110
  full_text = "\n".join([f"=== Page {i+1} ===\n{(page.extract_text() or '').strip()}" for i, page in enumerate(pdf.pages)])
111
+ result = json.dumps({"filename": os.path.basename(file_path), "content": full_text, "status": "complete"})
 
 
 
 
 
 
112
  with open(cache_path, "w", encoding="utf-8") as f:
113
  f.write(result)
114
+ with open(os.path.join(report_dir, f"{file_hash}_report.txt"), "w", encoding="utf-8") as out:
115
+ out.write(full_text)
116
  except Exception as e:
117
  print(f"Background processing failed: {str(e)}")
118
 
119
  def init_agent():
120
  default_tool_path = os.path.abspath("data/new_tool.json")
121
  target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
 
122
  if not os.path.exists(target_tool_path):
123
  shutil.copy(default_tool_path, target_tool_path)
124
 
 
141
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
142
 
143
  chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
144
+ file_upload = gr.File(
145
+ label="Upload Medical Records",
146
+ file_types=[".pdf", ".csv", ".xls", ".xlsx"],
147
+ file_count="multiple"
148
+ )
149
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
150
  send_btn = gr.Button("Analyze", variant="primary")
151
  conversation_state = gr.State([])
152
+ download_output = gr.File(label="Download Full Report (after tools finish)")
153
 
154
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
155
  start_time = time.time()
156
  try:
157
  history.append({"role": "user", "content": message})
158
  history.append({"role": "assistant", "content": "Analyzing records for potential oversights..."})
159
+ yield history, None
160
 
161
  extracted_data = ""
162
+ file_hash_value = ""
163
  if files:
164
  with ThreadPoolExecutor(max_workers=4) as executor:
165
  futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
166
+ results = [sanitize_utf8(f.result()) for f in as_completed(futures)]
167
+ extracted_data = "\n".join(results)
168
+ file_hash_value = file_hash(files[0].name)
169
 
170
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
171
  1. List potential missed diagnoses
 
173
  3. Note incomplete assessments
174
  4. Highlight abnormal results needing follow-up
175
 
176
+ Medical Records:\n{extracted_data[:15000]}
 
 
 
177
 
178
+ ### Potential Oversights:\n"""
 
 
 
179
 
180
  response = []
181
  for chunk in agent.run_gradio_chat(
 
191
  response.append(chunk)
192
  elif isinstance(chunk, list):
193
  response.extend([c.content for c in chunk if hasattr(c, 'content')])
194
+
195
  if len(response) % 3 == 0:
196
  history[-1] = {"role": "assistant", "content": "".join(response).strip()}
197
+ yield history, None
198
 
199
  final_output = "".join(response).strip()
200
  if not final_output:
201
  final_output = "No clear oversights identified. Recommend comprehensive review."
 
202
  if not final_output.startswith(("1.", "-", "*", "#")):
203
  final_output = "• " + final_output.replace("\n", "\n• ")
204
+ history[-1] = {"role": "assistant", "content": final_output}
205
 
206
+ report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
207
+ return history, report_path if os.path.exists(report_path) else None
 
208
 
209
  except Exception as e:
210
  history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
211
+ return history, None
212
 
213
  inputs = [msg_input, chatbot, conversation_state, file_upload]
214
+ outputs = [chatbot, download_output]
215
+ send_btn.click(analyze_potential_oversights, inputs=inputs, outputs=outputs)
216
+ msg_input.submit(analyze_potential_oversights, inputs=inputs, outputs=outputs)
217
 
218
  gr.Examples([
219
  ["What might have been missed in this patient's treatment?"],
 
226
  if __name__ == "__main__":
227
  print("Initializing medical analysis agent...")
228
  agent = init_agent()
 
229
  print("Launching interface...")
230
  demo = create_ui(agent)
231
  demo.queue().launch(
 
233
  server_port=7860,
234
  show_error=True,
235
  share=True
236
+ )