Ali2206 commited on
Commit
e0fba37
·
verified ·
1 Parent(s): d3766da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -61
app.py CHANGED
@@ -130,7 +130,7 @@ def init_agent():
130
  enable_checker=True,
131
  step_rag_num=8,
132
  seed=100,
133
- additional_default_tools=[],
134
  )
135
  agent.init_model()
136
  return agent
@@ -140,35 +140,28 @@ def create_ui(agent: TxAgent):
140
  gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
141
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
142
 
143
- chatbot = gr.Chatbot(label="Analysis", height=600)
144
- file_upload = gr.File(
145
- label="Upload Medical Records",
146
- file_types=[".pdf", ".csv", ".xls", ".xlsx"],
147
- file_count="multiple"
148
- )
149
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
150
  send_btn = gr.Button("Analyze", variant="primary")
151
  conversation_state = gr.State([])
152
  download_output = gr.File(label="Download Full Report")
153
 
154
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
155
- start_time = time.time()
156
  try:
157
- # Initialize conversation
158
- history.append((message, "Analyzing records for potential oversights..."))
159
  yield history, None
160
 
161
- # Process files
162
  extracted_data = ""
163
  file_hash_value = ""
164
- if files and isinstance(files, list):
165
  with ThreadPoolExecutor(max_workers=4) as executor:
166
- futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower())
167
- for f in files if hasattr(f, 'name')]
168
- extracted_data = "\n".join([sanitize_utf8(f.result()) for f in as_completed(futures)])
169
- file_hash_value = file_hash(files[0].name) if files else ""
170
 
171
- # Medical oversight analysis prompt
172
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
173
  1. List potential missed diagnoses
174
  2. Flag any medication conflicts
@@ -177,16 +170,10 @@ def create_ui(agent: TxAgent):
177
 
178
  Medical Records:\n{extracted_data[:15000]}
179
 
180
- Provide ONLY the potential oversights in this format:
181
-
182
- ### Potential Oversights:
183
- 1. [Missed diagnosis] - [Evidence from records]
184
- 2. [Medication issue] - [Supporting data]
185
- 3. [Assessment gap] - [Relevant findings]"""
186
 
187
- # Generate and stream response
188
- full_response = ""
189
- generator = agent.run_gradio_chat(
190
  message=analysis_prompt,
191
  history=[],
192
  temperature=0.2,
@@ -194,45 +181,26 @@ Provide ONLY the potential oversights in this format:
194
  max_token=4096,
195
  call_agent=False,
196
  conversation=conversation
197
- )
198
-
199
- for update in generator:
200
- if not update:
201
- continue
202
-
203
- if isinstance(update, str):
204
- full_response += update
205
- elif isinstance(update, list):
206
- full_response += "".join([msg.content for msg in update if hasattr(msg, 'content')])
207
-
208
- # Clean and update the response
209
- cleaned = full_response.replace("[TOOL_CALLS]", "").strip()
210
- if cleaned:
211
- history[-1] = (message, cleaned)
212
- yield history, None
213
-
214
- # Final cleaned response
215
- final_output = full_response.replace("[TOOL_CALLS]", "").strip()
216
  if not final_output:
217
  final_output = "No clear oversights identified. Recommend comprehensive review."
 
218
 
219
- # Prepare report path if available
220
- report_path = None
221
- if file_hash_value:
222
- possible_report = os.path.join(report_dir, f"{file_hash_value}_report.txt")
223
- if os.path.exists(possible_report):
224
- report_path = possible_report
225
-
226
- history[-1] = (message, final_output)
227
- print(f"Final analysis:\n{final_output}")
228
- yield history, report_path
229
 
230
  except Exception as e:
231
- print(f"Analysis error: {str(e)}")
232
- history[-1] = (message, f"❌ Analysis failed: {str(e)}")
233
- yield history, None
234
 
235
- # UI event handlers
236
  inputs = [msg_input, chatbot, conversation_state, file_upload]
237
  outputs = [chatbot, download_output]
238
  send_btn.click(analyze_potential_oversights, inputs=inputs, outputs=outputs)
@@ -249,7 +217,7 @@ Provide ONLY the potential oversights in this format:
249
  if __name__ == "__main__":
250
  print("Initializing medical analysis agent...")
251
  agent = init_agent()
252
-
253
  print("Performing warm-up call...")
254
  try:
255
  warm_up = agent.run_gradio_chat(
@@ -268,7 +236,7 @@ if __name__ == "__main__":
268
 
269
  print("Launching interface...")
270
  demo = create_ui(agent)
271
- demo.queue(concurrency_count=2).launch(
272
  server_name="0.0.0.0",
273
  server_port=7860,
274
  show_error=True,
 
130
  enable_checker=True,
131
  step_rag_num=8,
132
  seed=100,
133
+ additional_default_tools=[]
134
  )
135
  agent.init_model()
136
  return agent
 
140
  gr.Markdown("<h1 style='text-align: center;'>🩺 Clinical Oversight Assistant</h1>")
141
  gr.Markdown("<h3 style='text-align: center;'>Identify potential oversights in patient care</h3>")
142
 
143
+ chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
144
+ file_upload = gr.File(label="Upload Medical Records", file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
 
 
 
 
145
  msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
146
  send_btn = gr.Button("Analyze", variant="primary")
147
  conversation_state = gr.State([])
148
  download_output = gr.File(label="Download Full Report")
149
 
150
  def analyze_potential_oversights(message: str, history: list, conversation: list, files: list):
 
151
  try:
152
+ history.append({"role": "user", "content": message})
153
+ history.append({"role": "assistant", "content": "Analyzing records for potential oversights..."})
154
  yield history, None
155
 
 
156
  extracted_data = ""
157
  file_hash_value = ""
158
+ if files:
159
  with ThreadPoolExecutor(max_workers=4) as executor:
160
+ futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower()) for f in files if hasattr(f, 'name')]
161
+ results = [sanitize_utf8(f.result()) for f in as_completed(futures)]
162
+ extracted_data = "\n".join(results)
163
+ file_hash_value = file_hash(files[0].name)
164
 
 
165
  analysis_prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
166
  1. List potential missed diagnoses
167
  2. Flag any medication conflicts
 
170
 
171
  Medical Records:\n{extracted_data[:15000]}
172
 
173
+ ### Potential Oversights:\n"""
 
 
 
 
 
174
 
175
+ response = []
176
+ for chunk in agent.run_gradio_chat(
 
177
  message=analysis_prompt,
178
  history=[],
179
  temperature=0.2,
 
181
  max_token=4096,
182
  call_agent=False,
183
  conversation=conversation
184
+ ):
185
+ if isinstance(chunk, str):
186
+ response.append(chunk)
187
+ elif isinstance(chunk, list):
188
+ response.extend([c.content for c in chunk if hasattr(c, 'content')])
189
+ history[-1] = {"role": "assistant", "content": "".join(response).strip()}
190
+ yield history, None
191
+
192
+ final_output = "".join(response).strip()
 
 
 
 
 
 
 
 
 
 
193
  if not final_output:
194
  final_output = "No clear oversights identified. Recommend comprehensive review."
195
+ history[-1] = {"role": "assistant", "content": final_output}
196
 
197
+ report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
198
+ return history, report_path if os.path.exists(report_path) else None
 
 
 
 
 
 
 
 
199
 
200
  except Exception as e:
201
+ history.append({"role": "assistant", "content": f"Analysis failed: {str(e)}"})
202
+ return history, None
 
203
 
 
204
  inputs = [msg_input, chatbot, conversation_state, file_upload]
205
  outputs = [chatbot, download_output]
206
  send_btn.click(analyze_potential_oversights, inputs=inputs, outputs=outputs)
 
217
  if __name__ == "__main__":
218
  print("Initializing medical analysis agent...")
219
  agent = init_agent()
220
+
221
  print("Performing warm-up call...")
222
  try:
223
  warm_up = agent.run_gradio_chat(
 
236
 
237
  print("Launching interface...")
238
  demo = create_ui(agent)
239
+ demo.queue().launch(
240
  server_name="0.0.0.0",
241
  server_port=7860,
242
  show_error=True,