Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# Full updated app.py with TOOL_CALLS
|
2 |
|
3 |
import sys
|
4 |
import os
|
@@ -15,6 +15,7 @@ import psutil
|
|
15 |
import subprocess
|
16 |
import traceback
|
17 |
import torch
|
|
|
18 |
|
19 |
os.environ["VLLM_LOGGING_LEVEL"] = "DEBUG"
|
20 |
if not torch.cuda.is_available():
|
@@ -162,7 +163,7 @@ def create_ui(agent):
|
|
162 |
try:
|
163 |
history.append({"role": "user", "content": message})
|
164 |
history.append({"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."})
|
165 |
-
yield history, None
|
166 |
|
167 |
extracted = ""
|
168 |
file_hash_value = ""
|
@@ -228,7 +229,7 @@ Medical Records:
|
|
228 |
else:
|
229 |
history.append({"role": "assistant", "content": display_response})
|
230 |
|
231 |
-
yield history, None
|
232 |
|
233 |
full_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL).strip()
|
234 |
full_response = full_response.replace('[TxAgent]', '').strip()
|
@@ -244,12 +245,12 @@ Medical Records:
|
|
244 |
else:
|
245 |
history.append({"role": "assistant", "content": full_response})
|
246 |
|
247 |
-
yield history, report_path if report_path and os.path.exists(report_path) else None
|
248 |
|
249 |
except Exception as e:
|
250 |
history.append({"role": "assistant", "content": f"❌ An error occurred in analyze: {str(e)}"})
|
251 |
traceback.print_exc()
|
252 |
-
yield history, None
|
253 |
|
254 |
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
255 |
msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
|
|
1 |
+
# Full updated app.py with TOOL_CALLS rendered + copy.deepcopy fix for Gradio UI updates
|
2 |
|
3 |
import sys
|
4 |
import os
|
|
|
15 |
import subprocess
|
16 |
import traceback
|
17 |
import torch
|
18 |
+
import copy
|
19 |
|
20 |
os.environ["VLLM_LOGGING_LEVEL"] = "DEBUG"
|
21 |
if not torch.cuda.is_available():
|
|
|
163 |
try:
|
164 |
history.append({"role": "user", "content": message})
|
165 |
history.append({"role": "assistant", "content": "⏳ Analyzing records for potential oversights..."})
|
166 |
+
yield copy.deepcopy(history), None
|
167 |
|
168 |
extracted = ""
|
169 |
file_hash_value = ""
|
|
|
229 |
else:
|
230 |
history.append({"role": "assistant", "content": display_response})
|
231 |
|
232 |
+
yield copy.deepcopy(history), None
|
233 |
|
234 |
full_response = re.sub(r"\[TOOL_CALLS\].*?\n*", "", full_response, flags=re.DOTALL).strip()
|
235 |
full_response = full_response.replace('[TxAgent]', '').strip()
|
|
|
245 |
else:
|
246 |
history.append({"role": "assistant", "content": full_response})
|
247 |
|
248 |
+
yield copy.deepcopy(history), report_path if report_path and os.path.exists(report_path) else None
|
249 |
|
250 |
except Exception as e:
|
251 |
history.append({"role": "assistant", "content": f"❌ An error occurred in analyze: {str(e)}"})
|
252 |
traceback.print_exc()
|
253 |
+
yield copy.deepcopy(history), None
|
254 |
|
255 |
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
256 |
msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|