Update app.py
Browse files
app.py
CHANGED
@@ -191,14 +191,13 @@ async def process_chunk(agent, chunk: str, chunk_index: int, total_chunks: int)
|
|
191 |
return chunk_index, clean_response(response), status
|
192 |
|
193 |
async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
194 |
-
"""Process the Excel file and generate a final report
|
195 |
messages = chatbot_state if chatbot_state else []
|
196 |
report_path = None
|
197 |
|
198 |
if file is None or not hasattr(file, "name"):
|
199 |
messages.append({"role": "assistant", "content": "β Please upload a valid Excel file before analyzing."})
|
200 |
-
|
201 |
-
return
|
202 |
|
203 |
try:
|
204 |
messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
|
@@ -228,14 +227,12 @@ async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]])
|
|
228 |
chunk_index, response, status = future.result()
|
229 |
chunk_responses[chunk_index] = response
|
230 |
messages.append({"role": "assistant", "content": status})
|
231 |
-
yield messages, None
|
232 |
|
233 |
# Filter out empty responses
|
234 |
chunk_responses = [r for r in chunk_responses if r]
|
235 |
if not chunk_responses:
|
236 |
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
237 |
-
|
238 |
-
return
|
239 |
|
240 |
# Summarize chunk responses incrementally
|
241 |
summary = ""
|
@@ -267,8 +264,7 @@ async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]])
|
|
267 |
current_summary_tokens = estimate_tokens(summary)
|
268 |
except Exception as e:
|
269 |
messages.append({"role": "assistant", "content": f"β Error summarizing intermediate results: {str(e)}"})
|
270 |
-
|
271 |
-
return
|
272 |
|
273 |
summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
|
274 |
current_summary_tokens += response_tokens
|
@@ -295,11 +291,10 @@ async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]])
|
|
295 |
elif isinstance(result, list):
|
296 |
for r in result:
|
297 |
if hasattr(r, "content"):
|
298 |
-
|
299 |
except Exception as e:
|
300 |
messages.append({"role": "assistant", "content": f"β Error generating final report: {str(e)}"})
|
301 |
-
|
302 |
-
return
|
303 |
|
304 |
final_report = f"# \U0001f9e0 Final Patient Report\n\n{clean_response(final_report_text)}"
|
305 |
messages[-1]["content"] = f"π Final Report:\n\n{clean_response(final_report_text)}"
|
@@ -314,13 +309,12 @@ async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]])
|
|
314 |
messages.append({"role": "assistant", "content": f"β
Report generated and saved: report_{timestamp}.md"})
|
315 |
logger.info(f"Total processing time: {time.time() - start_time:.2f} seconds")
|
316 |
|
317 |
-
|
318 |
|
319 |
except Exception as e:
|
320 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
321 |
logger.error(f"Processing failed: {str(e)}")
|
322 |
-
|
323 |
-
return
|
324 |
|
325 |
async def create_ui(agent):
|
326 |
"""Create the Gradio UI for the patient history analysis tool."""
|
@@ -360,13 +354,9 @@ async def create_ui(agent):
|
|
360 |
|
361 |
async def update_ui(file, current_state):
|
362 |
messages = current_state if current_state else []
|
363 |
-
report_path =
|
364 |
-
|
365 |
-
|
366 |
-
report_path = new_report_path
|
367 |
-
report_update = gr.update(visible=report_path is not None, value=report_path)
|
368 |
-
yield messages, report_update, messages
|
369 |
-
yield messages, gr.update(visible=report_path is not None, value=report_path), messages
|
370 |
|
371 |
analyze_btn.click(
|
372 |
fn=update_ui,
|
@@ -386,7 +376,9 @@ if __name__ == "__main__":
|
|
386 |
server_port=7860,
|
387 |
show_error=True,
|
388 |
allowed_paths=["/data/hf_cache/reports"],
|
389 |
-
share=False
|
|
|
|
|
390 |
)
|
391 |
except Exception as e:
|
392 |
print(f"Error: {str(e)}")
|
|
|
191 |
return chunk_index, clean_response(response), status
|
192 |
|
193 |
async def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
194 |
+
"""Process the Excel file and generate a final report."""
|
195 |
messages = chatbot_state if chatbot_state else []
|
196 |
report_path = None
|
197 |
|
198 |
if file is None or not hasattr(file, "name"):
|
199 |
messages.append({"role": "assistant", "content": "β Please upload a valid Excel file before analyzing."})
|
200 |
+
return messages, report_path
|
|
|
201 |
|
202 |
try:
|
203 |
messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
|
|
|
227 |
chunk_index, response, status = future.result()
|
228 |
chunk_responses[chunk_index] = response
|
229 |
messages.append({"role": "assistant", "content": status})
|
|
|
230 |
|
231 |
# Filter out empty responses
|
232 |
chunk_responses = [r for r in chunk_responses if r]
|
233 |
if not chunk_responses:
|
234 |
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
235 |
+
return messages, report_path
|
|
|
236 |
|
237 |
# Summarize chunk responses incrementally
|
238 |
summary = ""
|
|
|
264 |
current_summary_tokens = estimate_tokens(summary)
|
265 |
except Exception as e:
|
266 |
messages.append({"role": "assistant", "content": f"β Error summarizing intermediate results: {str(e)}"})
|
267 |
+
return messages, report_path
|
|
|
268 |
|
269 |
summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
|
270 |
current_summary_tokens += response_tokens
|
|
|
291 |
elif isinstance(result, list):
|
292 |
for r in result:
|
293 |
if hasattr(r, "content"):
|
294 |
+
summary_response += r.content
|
295 |
except Exception as e:
|
296 |
messages.append({"role": "assistant", "content": f"β Error generating final report: {str(e)}"})
|
297 |
+
return messages, report_path
|
|
|
298 |
|
299 |
final_report = f"# \U0001f9e0 Final Patient Report\n\n{clean_response(final_report_text)}"
|
300 |
messages[-1]["content"] = f"π Final Report:\n\n{clean_response(final_report_text)}"
|
|
|
309 |
messages.append({"role": "assistant", "content": f"β
Report generated and saved: report_{timestamp}.md"})
|
310 |
logger.info(f"Total processing time: {time.time() - start_time:.2f} seconds")
|
311 |
|
312 |
+
return messages, report_path
|
313 |
|
314 |
except Exception as e:
|
315 |
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
316 |
logger.error(f"Processing failed: {str(e)}")
|
317 |
+
return messages, report_path
|
|
|
318 |
|
319 |
async def create_ui(agent):
|
320 |
"""Create the Gradio UI for the patient history analysis tool."""
|
|
|
354 |
|
355 |
async def update_ui(file, current_state):
|
356 |
messages = current_state if current_state else []
|
357 |
+
messages, report_path = await process_final_report(agent, file, messages)
|
358 |
+
report_update = gr.update(visible=report_path is not None, value=report_path)
|
359 |
+
return messages, report_update, messages
|
|
|
|
|
|
|
|
|
360 |
|
361 |
analyze_btn.click(
|
362 |
fn=update_ui,
|
|
|
376 |
server_port=7860,
|
377 |
show_error=True,
|
378 |
allowed_paths=["/data/hf_cache/reports"],
|
379 |
+
share=False,
|
380 |
+
inline=False,
|
381 |
+
max_threads=40
|
382 |
)
|
383 |
except Exception as e:
|
384 |
print(f"Error: {str(e)}")
|