|
|
|
chatbot = gr.Chatbot( |
|
label="Analysis Conversation", |
|
height=600, |
|
show_copy_button=True, |
|
avatar_images=( |
|
"assets/user.png", |
|
"assets/assistant.png" |
|
) if os.path.exists("assets/user.png") else None, |
|
render=False, |
|
bubble_full_width=False, |
|
type="messages" |
|
) |
|
|
|
|
|
def analyze(message: str, history: List[dict], files: List) -> Generator[Dict[str, Any], None, None]: |
|
|
|
outputs = { |
|
"chatbot": history.copy(), |
|
"download_output": None, |
|
"final_summary": "", |
|
"progress_text": {"value": "Starting analysis...", "visible": True} |
|
} |
|
yield outputs |
|
|
|
try: |
|
|
|
history.append({"role": "user", "content": message}) |
|
outputs["chatbot"] = history |
|
yield outputs |
|
|
|
extracted = [] |
|
file_hash_value = "" |
|
|
|
if files: |
|
|
|
with ThreadPoolExecutor(max_workers=4) as executor: |
|
futures = [] |
|
for f in files: |
|
file_type = f.name.split(".")[-1].lower() |
|
futures.append(executor.submit(process_file, f.name, file_type)) |
|
|
|
for i, future in enumerate(as_completed(futures), 1): |
|
try: |
|
extracted.extend(future.result()) |
|
outputs["progress_text"] = update_progress(i, len(files), "Processing files") |
|
yield outputs |
|
except Exception as e: |
|
logger.error(f"File processing error: {e}") |
|
extracted.append({"error": f"Error processing file: {str(e)}"}) |
|
|
|
file_hash_value = file_hash(files[0].name) if files else "" |
|
history.append({"role": "assistant", "content": "✅ File processing complete"}) |
|
outputs.update({ |
|
"chatbot": history, |
|
"progress_text": update_progress(len(files), len(files), "Files processed") |
|
}) |
|
yield outputs |
|
|
|
|
|
text_content = "\n".join(json.dumps(item) for item in extracted) |
|
chunks = tokenize_and_chunk(text_content) |
|
combined_response = "" |
|
|
|
for chunk_idx, chunk in enumerate(chunks, 1): |
|
prompt = f"""Analyze this patient record for missed diagnoses...""" |
|
|
|
history.append({"role": "assistant", "content": ""}) |
|
outputs.update({ |
|
"chatbot": history, |
|
"progress_text": update_progress(chunk_idx, len(chunks), "Analyzing") |
|
}) |
|
yield outputs |
|
|
|
|
|
chunk_response = "" |
|
for update in process_response_stream(prompt, history): |
|
history[-1] = update |
|
chunk_response = update["content"] |
|
outputs.update({ |
|
"chatbot": history, |
|
"progress_text": update_progress(chunk_idx, len(chunks), "Analyzing") |
|
}) |
|
yield outputs |
|
|
|
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n" |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
summary = summarize_findings(combined_response) |
|
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None |
|
if report_path: |
|
with open(report_path, "w", encoding="utf-8") as f: |
|
f.write(combined_response + "\n\n" + summary) |
|
|
|
outputs.update({ |
|
"download_output": report_path if report_path else None, |
|
"final_summary": summary, |
|
"progress_text": {"visible": False} |
|
}) |
|
yield outputs |
|
|
|
except Exception as e: |
|
logger.error("Analysis error: %s", e) |
|
history.append({"role": "assistant", "content": f"❌ Error: {str(e)}"}) |
|
outputs.update({ |
|
"chatbot": history, |
|
"final_summary": f"Error occurred: {str(e)}", |
|
"progress_text": {"visible": False} |
|
}) |
|
yield outputs |