Update app.py
Browse files
app.py
CHANGED
@@ -245,7 +245,7 @@ Analyze this patient record excerpt for missed diagnoses (limit response to 500
|
|
245 |
|
246 |
with gr.Row():
|
247 |
with gr.Column(scale=3):
|
248 |
-
chatbot = gr.Chatbot(label="Analysis", height=500)
|
249 |
msg_input = gr.Textbox(placeholder="Ask about potential oversights...")
|
250 |
send_btn = gr.Button("Analyze", variant="primary")
|
251 |
file_upload = gr.File(file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="single")
|
@@ -254,54 +254,58 @@ Analyze this patient record excerpt for missed diagnoses (limit response to 500
|
|
254 |
final_summary = gr.Markdown("## Summary")
|
255 |
status = gr.Textbox(label="Status", interactive=False)
|
256 |
|
257 |
-
def analyze(message: str, history: List[
|
258 |
try:
|
259 |
-
if not
|
260 |
return history, "Please upload a file first", "No file uploaded"
|
261 |
-
|
262 |
-
|
263 |
-
file_type =
|
264 |
-
history.append(
|
265 |
-
|
266 |
# Process file
|
267 |
-
processed = process_file_cached(
|
268 |
if "error" in processed[0]:
|
|
|
269 |
return history, processed[0]["error"], "File processing failed"
|
270 |
-
|
271 |
# Prepare chunks
|
272 |
chunks = []
|
273 |
for item in processed:
|
274 |
if "content" in item:
|
275 |
chunks.append(item["content"])
|
276 |
elif "rows" in item:
|
277 |
-
rows_text = "\n".join([", ".join(map(str, row)) for row in item["rows"][:100]])
|
278 |
chunks.append(f"=== {item.get('sheet', 'Data')} ===\n{rows_text}")
|
279 |
-
|
280 |
if not chunks:
|
|
|
281 |
return history, "No processable content found", "Content extraction failed"
|
282 |
-
|
283 |
-
#
|
284 |
responses = []
|
285 |
-
for i, chunk in enumerate(chunks[:5]):
|
286 |
try:
|
287 |
-
prompt = PROMPT_TEMPLATE.format(chunk=chunk[:5000])
|
288 |
-
response = agent.run_quick_summary(prompt, 0.2, 256, 500)
|
289 |
cleaned = clean_response(response)
|
290 |
if cleaned:
|
291 |
responses.append(f"Analysis {i+1}:\n{cleaned}")
|
292 |
except Exception as e:
|
293 |
-
logger.warning(f"Error
|
294 |
continue
|
295 |
-
|
296 |
if not responses:
|
|
|
297 |
return history, "No valid analysis generated", "Analysis failed"
|
298 |
-
|
299 |
summary = "\n\n".join(responses)
|
300 |
-
history
|
301 |
return history, "Analysis completed", "Success"
|
302 |
-
|
303 |
except Exception as e:
|
304 |
logger.error(f"Analysis error: {e}")
|
|
|
305 |
return history, f"Error: {str(e)}", "Failed"
|
306 |
finally:
|
307 |
torch.cuda.empty_cache()
|
@@ -312,14 +316,16 @@ Analyze this patient record excerpt for missed diagnoses (limit response to 500
|
|
312 |
inputs=[msg_input, chatbot, file_upload],
|
313 |
outputs=[chatbot, final_summary, status]
|
314 |
)
|
|
|
315 |
msg_input.submit(
|
316 |
analyze,
|
317 |
inputs=[msg_input, chatbot, file_upload],
|
318 |
outputs=[chatbot, final_summary, status]
|
319 |
)
|
320 |
-
|
321 |
return demo
|
322 |
|
|
|
323 |
if __name__ == "__main__":
|
324 |
try:
|
325 |
agent = init_agent()
|
@@ -331,4 +337,4 @@ if __name__ == "__main__":
|
|
331 |
)
|
332 |
except Exception as e:
|
333 |
logger.error(f"Fatal error: {e}")
|
334 |
-
raise
|
|
|
245 |
|
246 |
with gr.Row():
|
247 |
with gr.Column(scale=3):
|
248 |
+
chatbot = gr.Chatbot(label="Analysis", height=500, type="messages")
|
249 |
msg_input = gr.Textbox(placeholder="Ask about potential oversights...")
|
250 |
send_btn = gr.Button("Analyze", variant="primary")
|
251 |
file_upload = gr.File(file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="single")
|
|
|
254 |
final_summary = gr.Markdown("## Summary")
|
255 |
status = gr.Textbox(label="Status", interactive=False)
|
256 |
|
257 |
+
def analyze(message: str, history: List[Dict], file_obj) -> tuple:
|
258 |
try:
|
259 |
+
if not file_obj:
|
260 |
return history, "Please upload a file first", "No file uploaded"
|
261 |
+
|
262 |
+
file_path = file_obj.name
|
263 |
+
file_type = os.path.splitext(file_path)[-1].lower().replace(".", "")
|
264 |
+
history.append({"role": "user", "content": message})
|
265 |
+
|
266 |
# Process file
|
267 |
+
processed = process_file_cached(file_path, file_type)
|
268 |
if "error" in processed[0]:
|
269 |
+
history.append({"role": "assistant", "content": processed[0]["error"]})
|
270 |
return history, processed[0]["error"], "File processing failed"
|
271 |
+
|
272 |
# Prepare chunks
|
273 |
chunks = []
|
274 |
for item in processed:
|
275 |
if "content" in item:
|
276 |
chunks.append(item["content"])
|
277 |
elif "rows" in item:
|
278 |
+
rows_text = "\n".join([", ".join(map(str, row)) for row in item["rows"][:100]])
|
279 |
chunks.append(f"=== {item.get('sheet', 'Data')} ===\n{rows_text}")
|
280 |
+
|
281 |
if not chunks:
|
282 |
+
history.append({"role": "assistant", "content": "No processable content found."})
|
283 |
return history, "No processable content found", "Content extraction failed"
|
284 |
+
|
285 |
+
# Analyze each chunk
|
286 |
responses = []
|
287 |
+
for i, chunk in enumerate(chunks[:5]):
|
288 |
try:
|
289 |
+
prompt = PROMPT_TEMPLATE.format(chunk=chunk[:5000])
|
290 |
+
response = agent.run_quick_summary(prompt, 0.2, 256, 500)
|
291 |
cleaned = clean_response(response)
|
292 |
if cleaned:
|
293 |
responses.append(f"Analysis {i+1}:\n{cleaned}")
|
294 |
except Exception as e:
|
295 |
+
logger.warning(f"Error analyzing chunk {i+1}: {str(e)}")
|
296 |
continue
|
297 |
+
|
298 |
if not responses:
|
299 |
+
history.append({"role": "assistant", "content": "No valid analysis generated."})
|
300 |
return history, "No valid analysis generated", "Analysis failed"
|
301 |
+
|
302 |
summary = "\n\n".join(responses)
|
303 |
+
history.append({"role": "assistant", "content": summary})
|
304 |
return history, "Analysis completed", "Success"
|
305 |
+
|
306 |
except Exception as e:
|
307 |
logger.error(f"Analysis error: {e}")
|
308 |
+
history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
309 |
return history, f"Error: {str(e)}", "Failed"
|
310 |
finally:
|
311 |
torch.cuda.empty_cache()
|
|
|
316 |
inputs=[msg_input, chatbot, file_upload],
|
317 |
outputs=[chatbot, final_summary, status]
|
318 |
)
|
319 |
+
|
320 |
msg_input.submit(
|
321 |
analyze,
|
322 |
inputs=[msg_input, chatbot, file_upload],
|
323 |
outputs=[chatbot, final_summary, status]
|
324 |
)
|
325 |
+
|
326 |
return demo
|
327 |
|
328 |
+
|
329 |
if __name__ == "__main__":
|
330 |
try:
|
331 |
agent = init_agent()
|
|
|
337 |
)
|
338 |
except Exception as e:
|
339 |
logger.error(f"Fatal error: {e}")
|
340 |
+
raise
|