Update app.py
Browse files
app.py
CHANGED
@@ -146,8 +146,6 @@ def init_agent():
|
|
146 |
seed=100,
|
147 |
additional_default_tools=[],
|
148 |
)
|
149 |
-
# This call attempts to load the models. If device inference fails,
|
150 |
-
# it will now produce DEBUG-level logs.
|
151 |
agent.init_model()
|
152 |
log_system_usage("After Load")
|
153 |
print("β
Agent Ready")
|
@@ -168,10 +166,12 @@ def create_ui(agent):
|
|
168 |
|
169 |
def analyze(message: str, history: list, files: list):
|
170 |
try:
|
|
|
171 |
history.append({"role": "user", "content": message})
|
172 |
history.append({"role": "assistant", "content": "β³ Analyzing records for potential oversights..."})
|
173 |
yield history, None
|
174 |
|
|
|
175 |
extracted = ""
|
176 |
file_hash_value = ""
|
177 |
if files:
|
@@ -206,7 +206,12 @@ Medical Records:
|
|
206 |
print("π Generated prompt:")
|
207 |
print(prompt)
|
208 |
|
|
|
209 |
full_response = ""
|
|
|
|
|
|
|
|
|
210 |
for chunk in agent.run_gradio_chat(
|
211 |
message=prompt,
|
212 |
history=[],
|
@@ -219,26 +224,40 @@ Medical Records:
|
|
219 |
try:
|
220 |
if chunk is None:
|
221 |
continue
|
|
|
|
|
222 |
if isinstance(chunk, str):
|
223 |
-
|
224 |
-
full_response += chunk
|
225 |
elif isinstance(chunk, list):
|
226 |
chunk_content = "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
|
227 |
-
print("DEBUG: Received list chunk:", chunk_content)
|
228 |
-
full_response += chunk_content
|
229 |
else:
|
230 |
print("DEBUG: Received unknown type chunk", type(chunk))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
except Exception as e:
|
232 |
print("β Error processing chunk:", str(e))
|
233 |
traceback.print_exc()
|
|
|
234 |
|
235 |
-
|
236 |
-
history[-1] = {"role": "assistant", "content": full_response}
|
237 |
-
yield history, None
|
238 |
-
|
239 |
if not full_response:
|
240 |
full_response = "β οΈ No clear oversights identified or model output was invalid."
|
241 |
|
|
|
242 |
report_path = None
|
243 |
if file_hash_value:
|
244 |
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
|
@@ -249,14 +268,19 @@ Medical Records:
|
|
249 |
print("β Error saving report:", str(e))
|
250 |
traceback.print_exc()
|
251 |
|
252 |
-
|
|
|
|
|
|
|
|
|
|
|
253 |
yield history, report_path if report_path and os.path.exists(report_path) else None
|
254 |
|
255 |
except Exception as e:
|
256 |
error_message = f"β An error occurred in analyze: {str(e)}"
|
257 |
print(error_message)
|
258 |
traceback.print_exc()
|
259 |
-
history
|
260 |
yield history, None
|
261 |
|
262 |
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
@@ -277,4 +301,4 @@ if __name__ == "__main__":
|
|
277 |
)
|
278 |
except Exception as e:
|
279 |
print("β Fatal error during launch:", str(e))
|
280 |
-
traceback.print_exc()
|
|
|
146 |
seed=100,
|
147 |
additional_default_tools=[],
|
148 |
)
|
|
|
|
|
149 |
agent.init_model()
|
150 |
log_system_usage("After Load")
|
151 |
print("β
Agent Ready")
|
|
|
166 |
|
167 |
def analyze(message: str, history: list, files: list):
|
168 |
try:
|
169 |
+
# Initialize response with loading message
|
170 |
history.append({"role": "user", "content": message})
|
171 |
history.append({"role": "assistant", "content": "β³ Analyzing records for potential oversights..."})
|
172 |
yield history, None
|
173 |
|
174 |
+
# Process files in parallel
|
175 |
extracted = ""
|
176 |
file_hash_value = ""
|
177 |
if files:
|
|
|
206 |
print("π Generated prompt:")
|
207 |
print(prompt)
|
208 |
|
209 |
+
# Initialize response tracking
|
210 |
full_response = ""
|
211 |
+
last_update_time = 0
|
212 |
+
response_chunks = []
|
213 |
+
|
214 |
+
# Process streaming response
|
215 |
for chunk in agent.run_gradio_chat(
|
216 |
message=prompt,
|
217 |
history=[],
|
|
|
224 |
try:
|
225 |
if chunk is None:
|
226 |
continue
|
227 |
+
|
228 |
+
# Handle different chunk types
|
229 |
if isinstance(chunk, str):
|
230 |
+
chunk_content = chunk
|
|
|
231 |
elif isinstance(chunk, list):
|
232 |
chunk_content = "".join([c.content for c in chunk if hasattr(c, "content") and c.content])
|
|
|
|
|
233 |
else:
|
234 |
print("DEBUG: Received unknown type chunk", type(chunk))
|
235 |
+
continue
|
236 |
+
|
237 |
+
if not chunk_content:
|
238 |
+
continue
|
239 |
+
|
240 |
+
response_chunks.append(chunk_content)
|
241 |
+
full_response = "".join(response_chunks)
|
242 |
+
|
243 |
+
# Update the chat history with the latest response
|
244 |
+
if len(history) > 0 and history[-1]["role"] == "assistant":
|
245 |
+
history[-1]["content"] = full_response
|
246 |
+
else:
|
247 |
+
history.append({"role": "assistant", "content": full_response})
|
248 |
+
|
249 |
+
yield history, None
|
250 |
+
|
251 |
except Exception as e:
|
252 |
print("β Error processing chunk:", str(e))
|
253 |
traceback.print_exc()
|
254 |
+
continue
|
255 |
|
256 |
+
# Final response handling
|
|
|
|
|
|
|
257 |
if not full_response:
|
258 |
full_response = "β οΈ No clear oversights identified or model output was invalid."
|
259 |
|
260 |
+
# Save report if we have files
|
261 |
report_path = None
|
262 |
if file_hash_value:
|
263 |
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt")
|
|
|
268 |
print("β Error saving report:", str(e))
|
269 |
traceback.print_exc()
|
270 |
|
271 |
+
# Ensure the final response is in the history
|
272 |
+
if len(history) > 0 and history[-1]["role"] == "assistant":
|
273 |
+
history[-1]["content"] = full_response
|
274 |
+
else:
|
275 |
+
history.append({"role": "assistant", "content": full_response})
|
276 |
+
|
277 |
yield history, report_path if report_path and os.path.exists(report_path) else None
|
278 |
|
279 |
except Exception as e:
|
280 |
error_message = f"β An error occurred in analyze: {str(e)}"
|
281 |
print(error_message)
|
282 |
traceback.print_exc()
|
283 |
+
history.append({"role": "assistant", "content": error_message})
|
284 |
yield history, None
|
285 |
|
286 |
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output])
|
|
|
301 |
)
|
302 |
except Exception as e:
|
303 |
print("β Fatal error during launch:", str(e))
|
304 |
+
traceback.print_exc()
|