File size: 9,285 Bytes
f75a23b
f394b25
d184610
f6e551c
f394b25
318bbe7
a7e68bf
1244d40
d16299c
1c5bd8e
d16299c
d8282f1
f6e551c
 
 
d16299c
f6e551c
 
 
 
 
 
 
 
 
 
f75a23b
d16299c
 
 
1244d40
 
 
1de8c2b
13ad0d3
 
f6e551c
d16299c
 
f6e551c
 
 
 
d16299c
 
f6e551c
d16299c
 
6e39ead
f6e551c
 
 
6e39ead
ad85a12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28e1ce8
ad85a12
 
 
28e1ce8
ad85a12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f6e551c
d184610
d16299c
f6e551c
 
6e39ead
f6e551c
 
6e39ead
f6e551c
d16299c
 
f6e551c
d16299c
 
 
 
13ad0d3
d16299c
f6e551c
 
d16299c
6e39ead
585f453
 
 
 
 
77810f8
585f453
 
77810f8
 
585f453
 
98f2d10
585f453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16299c
585f453
d16299c
 
 
 
 
 
 
318bbe7
585f453
318bbe7
585f453
318bbe7
f6e551c
318bbe7
585f453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98f2d10
585f453
 
affa0af
d16299c
585f453
 
 
 
 
 
 
 
 
98f2d10
585f453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e39ead
5b0bfb5
77810f8
6e39ead
585f453
 
 
 
 
 
 
 
 
a71a831
55e3db0
6e39ead
f394b25
d8282f1
d16299c
 
13ad0d3
d8282f1
 
1bdb280
585f453
 
d8282f1
 
13ad0d3
c7670bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
import sys
import os
import pandas as pd
import json
import gradio as gr
from typing import List, Tuple, Dict, Any, Generator, Union
import hashlib
import shutil
import re
from datetime import datetime
import time

# Configuration and setup
persistent_dir = "/data/hf_cache"
os.makedirs(persistent_dir, exist_ok=True)

model_cache_dir = os.path.join(persistent_dir, "txagent_models")
tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
file_cache_dir = os.path.join(persistent_dir, "cache")
report_dir = os.path.join(persistent_dir, "reports")

for directory in [model_cache_dir, tool_cache_dir, file_cache_dir, report_dir]:
    os.makedirs(directory, exist_ok=True)

os.environ["HF_HOME"] = model_cache_dir
os.environ["TRANSFORMERS_CACHE"] = model_cache_dir

current_dir = os.path.dirname(os.path.abspath(__file__))
src_path = os.path.abspath(os.path.join(current_dir, "src"))
sys.path.insert(0, src_path)

from txagent.txagent import TxAgent

# Constants
MAX_TOKENS = 32768
MAX_NEW_TOKENS = 2048


def clean_response(text: str) -> str:
    try:
        text = text.encode('utf-8', 'surrogatepass').decode('utf-8')
    except UnicodeError:
        text = text.encode('utf-8', 'replace').decode('utf-8')
    text = re.sub(r"\[.*?\]|\bNone\b", "", text, flags=re.DOTALL)
    text = re.sub(r"\n{3,}", "\n\n", text)
    text = re.sub(r"[^\n#\-\*\w\s\.,:\(\)]+", "", text)
    return text.strip()


def estimate_tokens(text: str) -> int:
    return len(text) // 3.5


def extract_text_from_excel(file_path: str) -> str:
    all_text = []
    xls = pd.ExcelFile(file_path)
    for sheet_name in xls.sheet_names:
        df = xls.parse(sheet_name)
        df = df.astype(str).fillna("")
        rows = df.apply(lambda row: " | ".join(row), axis=1)
        sheet_text = [f"[{sheet_name}] {line}" for line in rows]
        all_text.extend(sheet_text)
    return "\n".join(all_text)


def split_text_into_chunks(text: str, max_tokens: int = MAX_TOKENS) -> List[str]:
    lines = text.split("\n")
    chunks = []
    current_chunk = []
    current_tokens = 0

    for line in lines:
        tokens = estimate_tokens(line)
        if current_tokens + tokens > max_tokens:
            chunks.append("\n".join(current_chunk))
            current_chunk = [line]
            current_tokens = tokens
        else:
            current_chunk.append(line)
            current_tokens += tokens

    if current_chunk:
        chunks.append("\n".join(current_chunk))
    return chunks


def build_prompt_from_text(chunk: str) -> str:
    return f"""
### Unstructured Clinical Records

You are reviewing unstructured, mixed-format clinical documentation from various forms, tables, and sheets.

**Objective:** Identify patterns, missed diagnoses, inconsistencies, and follow-up gaps.

Here is the extracted content chunk:

{chunk}

Please analyze the above and provide:
- Diagnostic Patterns
- Medication Issues
- Missed Opportunities
- Inconsistencies
- Follow-up Recommendations
"""


def init_agent():
    default_tool_path = os.path.abspath("data/new_tool.json")
    target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")

    if not os.path.exists(target_tool_path):
        shutil.copy(default_tool_path, target_tool_path)

    agent = TxAgent(
        model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
        rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
        tool_files_dict={"new_tool": target_tool_path},
        force_finish=True,
        enable_checker=True,
        step_rag_num=4,
        seed=100,
        additional_default_tools=[]
    )
    agent.init_model()
    return agent


def stream_final_report(agent, file) -> Generator[Tuple[List[Dict[str, str]], Union[str, None]], None, None]:
    # Initialize with empty values
    messages = []
    report_path = None
    
    if file is None or not hasattr(file, "name"):
        messages = [{"role": "assistant", "content": "❌ Please upload a valid Excel file before analyzing."}]
        yield messages, None
        return

    try:
        # Initial processing message
        messages = [{"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"},
                   {"role": "assistant", "content": "⏳ Extracting and analyzing data..."}]
        yield messages, None

        extracted_text = extract_text_from_excel(file.name)
        chunks = split_text_into_chunks(extracted_text)
        chunk_responses = []

        # Process each chunk
        for i, chunk in enumerate(chunks):
            messages.append({"role": "assistant", "content": f"πŸ” Analyzing chunk {i+1}/{len(chunks)}..."})
            yield messages, None
            
            prompt = build_prompt_from_text(chunk)
            response = ""
            for result in agent.run_gradio_chat(
                message=prompt,
                history=[],
                temperature=0.2,
                max_new_tokens=MAX_NEW_TOKENS,
                max_token=MAX_TOKENS,
                call_agent=False,
                conversation=[],
            ):
                if isinstance(result, str):
                    response += result
                elif hasattr(result, "content"):
                    response += result.content
                elif isinstance(result, list):
                    for r in result:
                        if hasattr(r, "content"):
                            response += r.content
            
            chunk_responses.append(clean_response(response))
            messages.append({"role": "assistant", "content": f"βœ… Chunk {i+1} analysis complete"})
            yield messages, None

        # Final summarization
        final_prompt = "\n\n".join(chunk_responses) + "\n\nSummarize the key findings above."
        messages.append({"role": "assistant", "content": "πŸ“Š Generating final report..."})
        yield messages, None

        stream_text = ""
        for result in agent.run_gradio_chat(
            message=final_prompt,
            history=[],
            temperature=0.2,
            max_new_tokens=MAX_NEW_TOKENS,
            max_token=MAX_TOKENS,
            call_agent=False,
            conversation=[],
        ):
            if isinstance(result, str):
                stream_text += result
            elif hasattr(result, "content"):
                stream_text += result.content
            elif isinstance(result, list):
                for r in result:
                    if hasattr(r, "content"):
                        stream_text += r.content
            
            messages[-1]["content"] = f"πŸ“Š Generating final report...\n\n{clean_response(stream_text)}"
            yield messages, None

        # Save final report
        final_report = f"# \U0001f9e0 Final Patient Report\n\n{clean_response(stream_text)}"
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        report_path = os.path.join(report_dir, f"report_{timestamp}.md")
        
        with open(report_path, 'w') as f:
            f.write(final_report)

        messages.append({"role": "assistant", "content": f"βœ… Report generated and saved: report_{timestamp}.md"})
        yield messages, report_path

    except Exception as e:
        messages.append({"role": "assistant", "content": f"❌ Error processing file: {str(e)}"})
        yield messages, None


def create_ui(agent):
    with gr.Blocks(title="Patient History Chat", css=".gradio-container {max-width: 900px !important}") as demo:
        gr.Markdown("## πŸ₯ Patient History Analysis Tool")
        
        with gr.Row():
            with gr.Column(scale=3):
                chatbot = gr.Chatbot(
                    label="Clinical Assistant",
                    show_copy_button=True,
                    height=600,
                    type="messages",
                    avatar_images=(
                        None,  # User avatar
                        "https://i.imgur.com/6wX7Zb4.png"  # Bot avatar
                    )
                )
            with gr.Column(scale=1):
                file_upload = gr.File(
                    label="Upload Excel File",
                    file_types=[".xlsx"],
                    height=100
                )
                analyze_btn = gr.Button(
                    "🧠 Analyze Patient History",
                    variant="primary"
                )
                report_output = gr.File(
                    label="Download Report",
                    visible=False,
                    interactive=False
                )

        analyze_btn.click(
            fn=lambda file: stream_final_report(agent, file),
            inputs=[file_upload],
            outputs=[chatbot, report_output],
            api_name="analyze"
        )

        def show_report(report_path):
            if report_path:
                return gr.File(visible=True, value=report_path)
            return gr.File(visible=False)

    return demo


if __name__ == "__main__":
    try:
        agent = init_agent()
        demo = create_ui(agent)
        demo.launch(
            server_name="0.0.0.0",
            server_port=7860,
            show_error=True,
            allowed_paths=["/data/hf_cache/reports"],
            share=False
        )
    except Exception as e:
        print(f"Error: {str(e)}")
        sys.exit(1)