Ali2206 commited on
Commit
d14e134
·
verified ·
1 Parent(s): f264437

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -149
app.py CHANGED
@@ -139,48 +139,35 @@ def init_agent():
139
  agent.init_model()
140
  return agent
141
 
142
- def format_analysis_output(text: str) -> str:
143
- """Clean and format the analysis output for better display"""
144
- # Remove tool call artifacts
145
- text = text.replace("[TOOL_CALLS]", "").strip()
146
- if "[TOOL_CALLS]" in text:
147
- text = text.split("[TOOL_CALLS]")[0].strip()
148
-
149
- # Remove duplicate sections
150
- if "Based on the medical records provided" in text:
151
- parts = text.split("Based on the medical records provided")
152
- if len(parts) > 1:
153
- text = "Based on the medical records provided" + parts[-1]
154
-
155
- # Format sections with Markdown and emojis
156
- replacements = {
157
- "1. **Missed Diagnoses**:": "### 🔍 Missed Diagnoses",
158
- "2. **Medication Conflicts**:": "\n### 💊 Medication Conflicts",
159
- "3. **Incomplete Assessments**:": "\n### 📋 Incomplete Assessments",
160
- "4. **Abnormal Results Needing Follow-up**:": "\n### ⚠️ Abnormal Results Needing Follow-up",
161
- "Overall, the patient's medical records": "\n### 📝 Overall Assessment"
162
- }
163
-
164
- for old, new in replacements.items():
165
- text = text.replace(old, new)
166
-
167
- return text
168
-
169
- def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
170
- start_time = time.time()
171
- try:
172
- history = history + [{"role": "user", "content": message}, {"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."}]
173
- yield history, None
174
-
175
- extracted_data = ""
176
- file_hash_value = ""
177
- if files and isinstance(files, list):
178
- with ThreadPoolExecutor(max_workers=4) as executor:
179
- futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
180
- extracted_data = "\n".join([sanitize_utf8(f.result()) for f in as_completed(futures)])
181
- file_hash_value = file_hash(files[0].name) if files else ""
182
-
183
- analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
184
  1. List potential missed diagnoses
185
  2. Flag any medication conflicts
186
  3. Note incomplete assessments
@@ -190,111 +177,51 @@ Medical Records:\n{extracted_data[:15000]}
190
 
191
  ### Potential Oversights:\n"""
192
 
193
- response = ""
194
- for chunk in agent.run_gradio_chat(
195
- message=analysis_prompt,
196
- history=[],
197
- temperature=0.2,
198
- max_new_tokens=1024,
199
- max_token=4096,
200
- call_agent=False,
201
- conversation=conversation
202
- ):
203
- if isinstance(chunk, str):
204
- response += chunk
205
- elif isinstance(chunk, list):
206
- response += "".join([c.content for c in chunk if hasattr(c, 'content')])
207
-
208
- # Format the partial response for display
209
- formatted = format_analysis_output(response)
210
- yield history[:-1] + [{"role": "assistant", "content": formatted}], None
211
-
212
- # Final formatting
213
- final_output = format_analysis_output(response)
214
- if not final_output:
215
- final_output = "No clear oversights identified. Recommend comprehensive review."
216
-
217
- report_path = None
218
- if file_hash_value:
219
- possible_report = os.path.join(report_dir, f"{file_hash_value}_report.txt")
220
- if os.path.exists(possible_report):
221
- report_path = possible_report
222
-
223
- history = history[:-1] + [{"role": "assistant", "content": final_output}]
224
- yield history, report_path
225
-
226
- except Exception as e:
227
- history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
228
- yield history, None
229
-
230
- def create_ui(agent: TxAgent):
231
- with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 900px !important}") as demo:
232
- gr.Markdown("""
233
- <div style='text-align: center;'>
234
- <h1>🩺 Clinical Oversight Assistant</h1>
235
- <h3>Identify potential oversights in patient care</h3>
236
- <p>Upload medical records to analyze for missed diagnoses, medication conflicts, incomplete assessments, and abnormal results needing follow-up.</p>
237
- </div>
238
- """)
239
-
240
- with gr.Row():
241
- with gr.Column(scale=2):
242
- file_upload = gr.File(
243
- label="Upload Medical Records",
244
- file_types=[".pdf", ".csv", ".xls", ".xlsx"],
245
- file_count="multiple",
246
- height=100
247
- )
248
- msg_input = gr.Textbox(
249
- placeholder="Ask about potential oversights...",
250
- show_label=False,
251
- lines=3,
252
- max_lines=6
253
- )
254
- send_btn = gr.Button("Analyze", variant="primary", size="lg")
255
-
256
- gr.Examples(
257
- examples=[
258
- ["What might have been missed in this patient's treatment?"],
259
- ["Are there any medication conflicts in these records?"],
260
- ["What abnormal results require follow-up?"],
261
- ["Identify any incomplete assessments in these records"]
262
- ],
263
- inputs=msg_input,
264
- label="Example Queries"
265
- )
266
-
267
- with gr.Column(scale=3):
268
- chatbot = gr.Chatbot(
269
- label="Analysis Results",
270
- height=600,
271
- bubble_full_width=False,
272
- show_copy_button=True,
273
- avatar_images=(
274
- "assets/user.png",
275
- "assets/doctor.png"
276
- )
277
- )
278
- download_output = gr.File(
279
- label="Download Full Report",
280
- visible=False
281
- )
282
-
283
- conversation_state = gr.State([])
284
 
285
  inputs = [msg_input, chatbot, conversation_state, file_upload]
286
  outputs = [chatbot, download_output]
287
-
288
- send_btn.click(
289
- analyze_potential_oversights,
290
- inputs=inputs,
291
- outputs=outputs
292
- )
293
- msg_input.submit(
294
- analyze_potential_oversights,
295
- inputs=inputs,
296
- outputs=outputs
297
- )
298
 
299
  return demo
300
 
@@ -304,13 +231,10 @@ if __name__ == "__main__":
304
 
305
  print("Launching interface...")
306
  demo = create_ui(agent)
307
- demo.queue(
308
- concurrency_count=3,
309
- api_open=False
310
- ).launch(
311
  server_name="0.0.0.0",
312
  server_port=7860,
313
  show_error=True,
314
  allowed_paths=["/data/reports"],
315
  share=False
316
- )
 
139
  agent.init_model()
140
  return agent
141
 
142
+ def create_ui(agent: TxAgent):
143
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
144
+ gr.Markdown("""
145
+ <h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>
146
+ <h3 style='text-align: center;'>Identify potential oversights in patient care</h3>
147
+ """)
148
+
149
+ chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
150
+ file_upload = gr.File(label="Upload Medical Records", file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
151
+ msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
152
+ send_btn = gr.Button("Analyze", variant="primary")
153
+ conversation_state = gr.State([])
154
+ download_output = gr.File(label="Download Full Report")
155
+
156
+ def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
157
+ start_time = time.time()
158
+ try:
159
+ history = history + [{"role": "user", "content": message}, {"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."}]
160
+ yield history, None
161
+
162
+ extracted_data = ""
163
+ file_hash_value = ""
164
+ if files and isinstance(files, list):
165
+ with ThreadPoolExecutor(max_workers=4) as executor:
166
+ futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
167
+ extracted_data = "\n".join([sanitize_utf8(f.result()) for f in as_completed(futures)])
168
+ file_hash_value = file_hash(files[0].name) if files else ""
169
+
170
+ analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  1. List potential missed diagnoses
172
  2. Flag any medication conflicts
173
  3. Note incomplete assessments
 
177
 
178
  ### Potential Oversights:\n"""
179
 
180
+ response = ""
181
+ for chunk in agent.run_gradio_chat(
182
+ message=analysis_prompt,
183
+ history=[],
184
+ temperature=0.2,
185
+ max_new_tokens=1024,
186
+ max_token=4096,
187
+ call_agent=False,
188
+ conversation=conversation
189
+ ):
190
+ if isinstance(chunk, str):
191
+ response += chunk
192
+ elif isinstance(chunk, list):
193
+ response += "".join([c.content for c in chunk if hasattr(c, 'content')])
194
+
195
+ cleaned = response.replace("[TOOL_CALLS]", "").strip()
196
+ yield history[:-1] + [{"role": "assistant", "content": cleaned}], None
197
+
198
+ final_output = response.replace("[TOOL_CALLS]", "").strip()
199
+ if not final_output:
200
+ final_output = "No clear oversights identified. Recommend comprehensive review."
201
+
202
+ report_path = None
203
+ if file_hash_value:
204
+ possible_report = os.path.join(report_dir, f"{file_hash_value}_report.txt")
205
+ if os.path.exists(possible_report):
206
+ report_path = possible_report
207
+
208
+ history = history[:-1] + [{"role": "assistant", "content": final_output}]
209
+ yield history, report_path
210
+
211
+ except Exception as e:
212
+ history.append({"role": "assistant", "content": f"❌ Analysis failed: {str(e)}"})
213
+ yield history, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  inputs = [msg_input, chatbot, conversation_state, file_upload]
216
  outputs = [chatbot, download_output]
217
+ send_btn.click(analyze_potential_oversights, inputs=inputs, outputs=outputs)
218
+ msg_input.submit(analyze_potential_oversights, inputs=inputs, outputs=outputs)
219
+
220
+ gr.Examples([
221
+ ["What might have been missed in this patient's treatment?"],
222
+ ["Are there any medication conflicts in these records?"],
223
+ ["What abnormal results require follow-up?"]
224
+ ], inputs=msg_input)
 
 
 
225
 
226
  return demo
227
 
 
231
 
232
  print("Launching interface...")
233
  demo = create_ui(agent)
234
+ demo.queue(api_open=False).launch(
 
 
 
235
  server_name="0.0.0.0",
236
  server_port=7860,
237
  show_error=True,
238
  allowed_paths=["/data/reports"],
239
  share=False
240
+ )