File size: 14,696 Bytes
463c8b4 a6968c2 c9b3ae0 a6968c2 463c8b4 973658c 463c8b4 a6968c2 463c8b4 c0b6a0b c278ebf 90e24e0 463c8b4 c9b3ae0 a6968c2 463c8b4 a6968c2 463c8b4 a6968c2 463c8b4 eea533f 463c8b4 c0b6a0b 463c8b4 c0b6a0b a6968c2 41eb6bd a6968c2 41eb6bd a6968c2 cbd84d4 c0b6a0b cbd84d4 c0b6a0b c278ebf c0b6a0b 463c8b4 90e24e0 463c8b4 a6968c2 c0b6a0b c9b3ae0 41eb6bd cbd84d4 463c8b4 c9b3ae0 463c8b4 c9b3ae0 c0b6a0b 463c8b4 41eb6bd 463c8b4 c0b6a0b 463c8b4 eea533f 463c8b4 eea533f 463c8b4 eea533f 3683afe 463c8b4 eea533f bfa497f c0b6a0b eea533f c0b6a0b 51aebc3 3800ddf eea533f 3800ddf eea533f 51aebc3 eea533f c0b6a0b bfa497f 51aebc3 c0b6a0b 51aebc3 c0b6a0b 3800ddf c0b6a0b 463c8b4 eea533f 463c8b4 c0b6a0b eea533f 463c8b4 c0b6a0b 463c8b4 c0b6a0b eea533f 463c8b4 67dd49b c0b6a0b 67dd49b c0b6a0b 463c8b4 c0b6a0b 463c8b4 c0b6a0b 463c8b4 eea533f 463c8b4 eea533f c0b6a0b eea533f c0b6a0b eea533f c0b6a0b eea533f 463c8b4 eea533f 463c8b4 eea533f c0b6a0b 463c8b4 eea533f 463c8b4 c0b6a0b 41eb6bd c0b6a0b a6968c2 fe67870 e24be23 eea533f 463c8b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 |
import sys
import os
import pandas as pd
import pdfplumber
import json
import gradio as gr
from typing import List
from concurrent.futures import ThreadPoolExecutor, as_completed
import hashlib
import shutil
import re
import psutil
import subprocess
import threading
import torch
from diskcache import Cache
import time
# Persistent directory
persistent_dir = "/data/hf_cache"
os.makedirs(persistent_dir, exist_ok=True)
model_cache_dir = os.path.join(persistent_dir, "txagent_models")
tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
file_cache_dir = os.path.join(persistent_dir, "cache")
report_dir = os.path.join(persistent_dir, "reports")
vllm_cache_dir = os.path.join(persistent_dir, "vllm_cache")
for directory in [model_cache_dir, tool_cache_dir, file_cache_dir, report_dir, vllm_cache_dir]:
os.makedirs(directory, exist_ok=True)
os.environ["HF_HOME"] = model_cache_dir
os.environ["TRANSFORMERS_CACHE"] = model_cache_dir
os.environ["VLLM_CACHE_DIR"] = vllm_cache_dir
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["OMP_NUM_THREADS"] = str(os.cpu_count() // 2) # Optimize CPU threading
current_dir = os.path.dirname(os.path.abspath(__file__))
src_path = os.path.abspath(os.path.join(current_dir, "src"))
sys.path.insert(0, src_path)
from txagent.txagent import TxAgent
# Initialize cache with 10GB limit
cache = Cache(file_cache_dir, size_limit=10 * 1024**3)
def sanitize_utf8(text: str) -> str:
return text.encode("utf-8", "ignore").decode("utf-8")
def file_hash(path: str) -> str:
with open(path, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
def extract_all_pages(file_path: str, progress_callback=None) -> str:
try:
with pdfplumber.open(file_path) as pdf:
total_pages = len(pdf.pages)
if total_pages == 0:
return ""
batch_size = 10 # Process 10 pages per thread
batches = [(i, min(i + batch_size, total_pages)) for i in range(0, total_pages, batch_size)]
text_chunks = [""] * total_pages # Pre-allocate for page order
processed_pages = 0
def extract_batch(start: int, end: int) -> List[tuple]:
results = []
with pdfplumber.open(file_path) as pdf: # Reopen per thread
for page in pdf.pages[start:end]:
page_num = start + pdf.pages.index(page)
page_text = page.extract_text() or ""
results.append((page_num, f"=== Page {page_num + 1} ===\n{page_text.strip()}"))
return results
with ThreadPoolExecutor(max_workers=min(6, os.cpu_count())) as executor:
futures = [executor.submit(extract_batch, start, end) for start, end in batches]
for future in as_completed(futures):
for page_num, text in future.result():
text_chunks[page_num] = text
processed_pages += batch_size
if progress_callback:
progress_callback(min(processed_pages, total_pages), total_pages)
return "\n\n".join(filter(None, text_chunks))
except Exception as e:
return f"PDF processing error: {str(e)}"
def convert_file_to_json(file_path: str, file_type: str, progress_callback=None) -> str:
try:
file_h = file_hash(file_path)
cache_key = f"{file_h}_{file_type}"
if cache_key in cache:
return cache[cache_key]
if file_type == "pdf":
text = extract_all_pages(file_path, progress_callback)
result = json.dumps({"filename": os.path.basename(file_path), "content": text, "status": "initial"})
elif file_type == "csv":
df = pd.read_csv(file_path, encoding_errors="replace", header=None, dtype=str,
skip_blank_lines=False, on_bad_lines="skip")
content = df.fillna("").astype(str).values.tolist()
result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
elif file_type in ["xls", "xlsx"]:
df = pd.read_excel(file_path, engine="openpyxl", header=None, dtype=str)
content = df.fillna("").astype(str).values.tolist()
result = json.dumps({"filename": os.path.basename(file_path), "rows": content})
else:
result = json.dumps({"error": f"Unsupported file type: {file_type}"})
cache[cache_key] = result
return result
except Exception as e:
return json.dumps({"error": f"Error processing {os.path.basename(file_path)}: {str(e)}"})
def log_system_usage(tag=""):
try:
cpu = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory()
print(f"[{tag}] CPU: {cpu}% | RAM: {mem.used // (1024**2)}MB / {mem.total // (1024**2)}MB")
result = subprocess.run(
["nvidia-smi", "--query-gpu=memory.used,memory.total,utilization.gpu", "--format=csv,nounits,noheader"],
capture_output=True, text=True
)
if result.returncode == 0:
used, total, util = result.stdout.strip().split(", ")
print(f"[{tag}] GPU: {used}MB / {total}MB | Utilization: {util}%")
except Exception as e:
print(f"[{tag}] GPU/CPU monitor failed: {e}")
def clean_response(text: str) -> str:
text = sanitize_utf8(text)
text = re.sub(r"\[.*?\]|\bNone\b|To analyze the patient record excerpt.*?medications\.|Since the previous attempts.*?\.|I need to.*?medications\.|Retrieving tools.*?\.", "", text, flags=re.DOTALL)
text = re.sub(r"\n{3,}", "\n\n", text)
text = re.sub(r"[^\n#\-\*\w\s\.\,\:\(\)]+", "", text)
tool_to_heading = {
"get_abuse_info_by_drug_name": "Drugs",
"get_dependence_info_by_drug_name": "Drugs",
"get_abuse_types_and_related_adverse_reactions_and_controlled_substance_status_by_drug_name": "Drugs",
"get_info_for_patients_by_drug_name": "Drugs",
}
sections = {}
current_section = None
current_tool = None
lines = text.splitlines()
for line in lines:
line = line.strip()
if not line:
continue
tool_match = re.match(r"\[TOOL:\s*(\w+)\]", line)
if tool_match:
current_tool = tool_match.group(1)
continue
section_match = re.match(r"###\s*(Missed Diagnoses|Medication Conflicts|Incomplete Assessments|Urgent Follow-up)", line)
if section_match:
current_section = section_match.group(1)
if current_section not in sections:
sections[current_section] = []
continue
finding_match = re.match(r"-\s*.+", line)
if finding_match and current_section and not re.match(r"-\s*No issues identified", line):
if current_tool and current_tool in tool_to_heading:
heading = tool_to_heading[current_tool]
if heading not in sections:
sections[heading] = []
sections[heading].append(line)
else:
sections[current_section].append(line)
cleaned = []
for heading, findings in sections.items():
if findings:
cleaned.append(f"### {heading}\n" + "\n".join(findings))
text = "\n\n".join(cleaned).strip()
return text if text else ""
def init_agent():
print("π Initializing model...")
log_system_usage("Before Load")
default_tool_path = os.path.abspath("data/new_tool.json")
target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
if not os.path.exists(target_tool_path):
shutil.copy(default_tool_path, target_tool_path)
agent = TxAgent(
model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
tool_files_dict={"new_tool": target_tool_path},
force_finish=True,
enable_checker=False, # Disable checker for speed
step_rag_num=4,
seed=100,
additional_default_tools=[],
dtype=torch.float16, # Enable mixed precision
)
def preload_models():
agent.init_model()
log_system_usage("After Load")
preload_thread = threading.Thread(target=preload_models)
preload_thread.start()
preload_thread.join()
print("β
Agent Ready")
return agent
def create_ui(agent):
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("<h1 style='text-align: center;'>π©Ί Clinical Oversight Assistant</h1>")
chatbot = gr.Chatbot(label="Analysis", height=600, type="messages")
file_upload = gr.File(file_types=[".pdf", ".csv", ".xls", ".xlsx"], file_count="multiple")
msg_input = gr.Textbox(placeholder="Ask about potential oversights...", show_label=False)
send_btn = gr.Button("Analyze", variant="primary")
download_output = gr.File(label="Download Full Report")
progress_bar = gr.Progress()
prompt_template = """
Analyze the patient record excerpt for clinical oversights. Provide a concise, evidence-based summary in markdown with findings grouped under tool-derived headings (e.g., 'Drugs'). For each finding, include clinical context, risks, and recommendations. Precede findings with a tool tag (e.g., [TOOL: get_abuse_info_by_drug_name]). Output only markdown bullet points under headings. If no issues, state "No issues identified".
Patient Record Excerpt (Chunk {0} of {1}):
{chunk}
"""
def analyze(message: str, history: List[dict], files: List, progress=gr.Progress()):
history.append({"role": "user mesage": "user", "content": message})
yield history, None, None
extracted = ""
file_hash_value = ""
if files:
def update_extraction_progress(current, total):
progress(current / total, desc=f"Extracting text... Page {current}/{total}")
return history, None, None
with ThreadPoolExecutor(max_workers=6) as executor:
futures = [executor.submit(convert_file_to_json, f.name, f.name.split(".")[-1].lower(), update_extraction_progress) for f in files]
results = [sanitize_utf8(f.result()) for f in as_completed(futures)]
extracted = "\n".join(results)
file_hash_value = file_hash(files[0].name) if files else ""
history.append({"role": "assistant", "content": "β
Text extraction complete."})
yield history, None, None
chunk_size = 6000
chunks = [extracted[i:i + chunk_size] for i in range(0, len(extracted), chunk_size)]
combined_response = ""
batch_size = 2
try:
for batch_idx in range(0, len(chunks), batch_size):
batch_chunks = chunks[batch_idx:batch_idx + batch_size]
batch_prompts = [prompt_template.format(i + 1, len(chunks), chunk=chunk[:4000]) for i, chunk in enumerate(batch_chunks)]
batch_responses = []
progress((batch_idx + 1) / len(chunks), desc=f"Analyzing chunks {batch_idx + 1}-{min(batch_idx + batch_size, len(chunks))}/{len(chunks)}")
with ThreadPoolExecutor(max_workers=len(batch_chunks)) as executor:
futures = [executor.submit(agent.run_gradio_chat, prompt, [], 0.2, 512, 2048, False, []) for prompt in batch_prompts]
for future in as_completed(futures):
chunk_response = ""
for chunk_output in future.result():
if chunk_output is None:
continue
if isinstance(chunk_output, list):
for m in chunk_output:
if hasattr(m, 'content') and m.content:
cleaned = clean_response(m.content)
if cleaned and re.search(r"###\s*\w+", cleaned):
chunk_response += cleaned + "\n\n"
elif isinstance(chunk_output, str) and chunk_output.strip():
cleaned = clean_response(chunk_output)
if cleaned and re.search(r"###\s*\w+", cleaned):
chunk_response += cleaned + "\n\n"
batch_responses.append(chunk_response)
for chunk_idx, chunk_response in enumerate(batch_responses, batch_idx + 1):
if chunk_response:
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\n{chunk_response}\n"
else:
combined_response += f"--- Analysis for Chunk {chunk_idx} ---\nNo oversights identified for this chunk.\n\n"
history[-1] = {"role": "assistant", "content": combined_response.strip()}
yield history, None, None
if combined_response.strip() and not all("No oversights identified" in chunk for chunk in combined_response.split("--- Analysis for Chunk")):
history[-1]["content"] = combined_response.strip()
else:
history.append({"role": "assistant", "content": "No oversights identified in the provided records."})
report_path = os.path.join(report_dir, f"{file_hash_value}_report.txt") if file_hash_value else None
if report_path:
with open(report_path, "w", encoding="utf-8") as f:
f.write(combined_response)
yield history, report_path if report_path and os.path.exists(report_path) else None, None
except Exception as e:
print("π¨ ERROR:", e)
history.append({"role": "assistant", "content": f"β Error occurred: {str(e)}"})
yield history, None, None
send_btn.click(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output, progress_bar])
msg_input.submit(analyze, inputs=[msg_input, gr.State([]), file_upload], outputs=[chatbot, download_output, progress_bar])
return demo
if __name__ == "__main__":
print("π Launching app...")
agent = init_agent()
demo = create_ui(agent)
demo.queue(api_open=False).launch(
server_name="0.0.0.0",
server_port=7860,
show_error=True,
allowed_paths=[report_dir],
share=False
) |