Update app.py
Browse files
app.py
CHANGED
@@ -191,6 +191,8 @@ def create_ui(agent):
|
|
191 |
extracted = "\n".join(results)
|
192 |
file_hash_value = file_hash(files[0].name)
|
193 |
|
|
|
|
|
194 |
prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
|
195 |
1. List potential missed diagnoses
|
196 |
2. Flag any medication conflicts
|
@@ -198,7 +200,7 @@ def create_ui(agent):
|
|
198 |
4. Highlight abnormal results needing follow-up
|
199 |
|
200 |
Medical Records:
|
201 |
-
{extracted[:
|
202 |
|
203 |
### Potential Oversights:
|
204 |
"""
|
@@ -210,52 +212,61 @@ Medical Records:
|
|
210 |
full_response = ""
|
211 |
response_chunks = []
|
212 |
|
213 |
-
# Process streaming response
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
continue
|
237 |
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
# Remove any tool call markers from the displayed response
|
245 |
-
display_response = full_response.split('[TOOL_CALLS]')[0].strip()
|
246 |
-
|
247 |
-
# Update the chat history with the latest response
|
248 |
-
if len(history) > 0 and history[-1]["role"] == "assistant":
|
249 |
-
history[-1]["content"] = display_response
|
250 |
-
else:
|
251 |
-
history.append({"role": "assistant", "content": display_response})
|
252 |
-
|
253 |
-
yield history, None
|
254 |
-
|
255 |
-
except Exception as e:
|
256 |
-
print("❌ Error processing chunk:", str(e))
|
257 |
-
traceback.print_exc()
|
258 |
-
continue
|
259 |
|
260 |
# Final response handling
|
261 |
if not full_response:
|
@@ -263,6 +274,7 @@ Medical Records:
|
|
263 |
else:
|
264 |
# Clean up the final response
|
265 |
full_response = full_response.split('[TOOL_CALLS]')[0].strip()
|
|
|
266 |
|
267 |
# Save report if we have files
|
268 |
report_path = None
|
|
|
191 |
extracted = "\n".join(results)
|
192 |
file_hash_value = file_hash(files[0].name)
|
193 |
|
194 |
+
# Truncate extracted content to avoid token limit issues
|
195 |
+
max_content_length = 8000 # Reduced from 12000 to prevent token overflow
|
196 |
prompt = f"""Review these medical records and identify EXACTLY what might have been missed:
|
197 |
1. List potential missed diagnoses
|
198 |
2. Flag any medication conflicts
|
|
|
200 |
4. Highlight abnormal results needing follow-up
|
201 |
|
202 |
Medical Records:
|
203 |
+
{extracted[:max_content_length]}
|
204 |
|
205 |
### Potential Oversights:
|
206 |
"""
|
|
|
212 |
full_response = ""
|
213 |
response_chunks = []
|
214 |
|
215 |
+
# Process streaming response with error handling
|
216 |
+
try:
|
217 |
+
for chunk in agent.run_gradio_chat(
|
218 |
+
message=prompt,
|
219 |
+
history=[],
|
220 |
+
temperature=0.2,
|
221 |
+
max_new_tokens=2048,
|
222 |
+
max_token=4096,
|
223 |
+
call_agent=False,
|
224 |
+
conversation=[]
|
225 |
+
):
|
226 |
+
try:
|
227 |
+
if chunk is None:
|
228 |
+
continue
|
229 |
+
|
230 |
+
# Handle different chunk types
|
231 |
+
if isinstance(chunk, str):
|
232 |
+
chunk_content = chunk
|
233 |
+
elif hasattr(chunk, 'content'):
|
234 |
+
chunk_content = chunk.content
|
235 |
+
elif isinstance(chunk, list):
|
236 |
+
chunk_content = "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
|
237 |
+
else:
|
238 |
+
print("DEBUG: Received unknown type chunk", type(chunk))
|
239 |
+
continue
|
240 |
+
|
241 |
+
if not chunk_content:
|
242 |
+
continue
|
243 |
+
|
244 |
+
response_chunks.append(chunk_content)
|
245 |
+
full_response = "".join(response_chunks)
|
246 |
+
|
247 |
+
# Clean the response for display
|
248 |
+
display_response = full_response.split('[TOOL_CALLS]')[0].strip()
|
249 |
+
display_response = display_response.replace('[TxAgent]', '').strip()
|
250 |
+
|
251 |
+
# Update the chat history with the latest response
|
252 |
+
if len(history) > 0 and history[-1]["role"] == "assistant":
|
253 |
+
history[-1]["content"] = display_response
|
254 |
+
else:
|
255 |
+
history.append({"role": "assistant", "content": display_response})
|
256 |
+
|
257 |
+
yield history, None
|
258 |
+
|
259 |
+
except Exception as e:
|
260 |
+
print("❌ Error processing chunk:", str(e))
|
261 |
+
traceback.print_exc()
|
262 |
continue
|
263 |
|
264 |
+
except Exception as e:
|
265 |
+
print("❌ Error in model streaming:", str(e))
|
266 |
+
traceback.print_exc()
|
267 |
+
history.append({"role": "assistant", "content": f"Error in model response: {str(e)}"})
|
268 |
+
yield history, None
|
269 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
|
271 |
# Final response handling
|
272 |
if not full_response:
|
|
|
274 |
else:
|
275 |
# Clean up the final response
|
276 |
full_response = full_response.split('[TOOL_CALLS]')[0].strip()
|
277 |
+
full_response = full_response.replace('[TxAgent]', '').strip()
|
278 |
|
279 |
# Save report if we have files
|
280 |
report_path = None
|