Ali2206 commited on
Commit
a7e68bf
·
verified ·
1 Parent(s): 3213a0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +266 -335
app.py CHANGED
@@ -1,275 +1,278 @@
1
  import sys
2
  import os
3
  import pandas as pd
4
- import json
5
  import gradio as gr
6
- from typing import List, Tuple, Dict, Any
7
- import hashlib
8
- import shutil
9
  import re
 
10
  from datetime import datetime
11
- import time
12
- import markdown
13
  from collections import defaultdict
 
14
 
15
- # Configuration and setup
16
- persistent_dir = "/data/hf_cache"
17
- os.makedirs(persistent_dir, exist_ok=True)
18
-
19
- model_cache_dir = os.path.join(persistent_dir, "txagent_models")
20
- tool_cache_dir = os.path.join(persistent_dir, "tool_cache")
21
- file_cache_dir = os.path.join(persistent_dir, "cache")
22
- report_dir = os.path.join(persistent_dir, "reports")
23
-
24
- for directory in [model_cache_dir, tool_cache_dir, file_cache_dir, report_dir]:
25
- os.makedirs(directory, exist_ok=True)
26
-
27
- os.environ["HF_HOME"] = model_cache_dir
28
- os.environ["TRANSFORMERS_CACHE"] = model_cache_dir
29
 
30
- current_dir = os.path.dirname(os.path.abspath(__file__))
31
- src_path = os.path.abspath(os.path.join(current_dir, "src"))
32
- sys.path.insert(0, src_path)
 
33
 
34
- from txagent.txagent import TxAgent
 
 
 
 
 
35
 
36
- def file_hash(path: str) -> str:
37
- """Generate MD5 hash of file contents"""
38
- with open(path, "rb") as f:
39
- return hashlib.md5(f.read()).hexdigest()
40
-
41
- def clean_response(text: str) -> str:
42
- """Clean and normalize text output"""
43
- try:
44
- text = text.encode('utf-8', 'surrogatepass').decode('utf-8')
45
- except UnicodeError:
46
- text = text.encode('utf-8', 'replace').decode('utf-8')
47
-
48
- # Remove unwanted patterns and normalize whitespace
49
- text = re.sub(r"\[.*?\]|\bNone\b", "", text, flags=re.DOTALL)
50
- text = re.sub(r"\n{3,}", "\n\n", text)
51
- text = re.sub(r"[^\n#\-\*\w\s\.,:\(\)]+", "", text)
52
- return text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- def extract_patient_data(df: pd.DataFrame) -> Dict[str, Any]:
55
- """Extract and organize all medical data for a single patient"""
56
- patient_data = {
57
- 'appointments': [],
58
- 'timeline': defaultdict(list),
59
- 'doctors': set(),
60
- 'medications': defaultdict(list),
61
- 'diagnoses': defaultdict(list),
62
- 'tests': defaultdict(list)
63
- }
64
-
65
- # Sort by interview date to create timeline
66
- df = df.sort_values('Interview Date')
67
-
68
- for _, row in df.iterrows():
69
- appointment = {
70
- 'date': row.get('Interview Date', ''),
71
- 'doctor': row.get('Interviewer', ''),
72
- 'form': row.get('Form Name', ''),
73
- 'item': row.get('Form Item', ''),
74
- 'response': row.get('Item Response', ''),
75
- 'notes': row.get('Description', '')
76
- }
77
- patient_data['appointments'].append(appointment)
78
- patient_data['doctors'].add(row.get('Interviewer', ''))
79
 
80
- # Categorize data for analysis
81
- form_name = row['Form Name'].lower()
82
- item = row['Form Item'].lower()
83
- response = row['Item Response']
 
 
 
84
 
85
- if 'medication' in form_name or 'drug' in form_name:
86
- patient_data['medications'][item].append({
87
- 'date': row['Interview Date'],
88
- 'doctor': row['Interviewer'],
89
- 'response': response
90
- })
91
- elif 'diagnosis' in form_name:
92
- patient_data['diagnoses'][item].append({
93
- 'date': row['Interview Date'],
94
- 'doctor': row['Interviewer'],
95
- 'response': response
96
  })
97
- elif 'test' in form_name or 'lab' in form_name:
98
- patient_data['tests'][item].append({
99
- 'date': row['Interview Date'],
100
- 'doctor': row['Interviewer'],
101
- 'response': response
 
 
 
102
  })
103
 
104
- # Add to timeline by date
105
- patient_data['timeline'][row['Interview Date']].append({
106
- 'form': row['Form Name'],
107
- 'item': row['Form Item'],
108
- 'response': row['Item Response'],
109
- 'doctor': row['Interviewer']
110
- })
111
-
112
- return patient_data
113
 
114
- def generate_patient_prompt(patient_data: Dict[str, Any]) -> str:
115
- """Generate comprehensive analysis prompt for a single patient"""
116
- # Create timeline summary
117
- timeline_text = []
118
- for date, entries in patient_data['timeline'].items():
119
- timeline_text.append(f"\n### {date}")
120
- for entry in entries:
121
- timeline_text.append(
122
- f"- {entry['form']}: {entry['item']} = {entry['response']} (by Dr. {entry['doctor']})"
123
- )
124
-
125
- # Create medication history
126
- meds_text = []
127
- for med, records in patient_data['medications'].items():
128
- meds_text.append(f"\n- {med}:")
129
- for record in records:
130
- meds_text.append(
131
- f" - Prescribed on {record['date']} by Dr. {record['doctor']}: {record['response']}"
132
- )
133
-
134
- # Create diagnosis history
135
- diag_text = []
136
- for diag, records in patient_data['diagnoses'].items():
137
- diag_text.append(f"\n- {diag}:")
138
- for record in records:
139
- diag_text.append(
140
- f" - Diagnosed on {record['date']} by Dr. {record['doctor']}: {record['response']}"
141
- )
142
-
143
- # Create test history
144
- tests_text = []
145
- for test, records in patient_data['tests'].items():
146
- tests_text.append(f"\n- {test}:")
147
- for record in records:
148
- tests_text.append(
149
- f" - Tested on {record['date']} by Dr. {record['doctor']}: {record['response']}"
150
  )
151
-
152
- prompt = f"""
153
- **COMPREHENSIVE PATIENT HISTORY ANALYSIS**
154
-
155
- **Patient Timeline**:
156
- {"".join(timeline_text)}
157
-
158
- **Medical History Overview**:
159
- ### Medications
160
- {"".join(meds_text) if meds_text else "No medication records found"}
161
-
162
- ### Diagnoses
163
- {"".join(diag_text) if diag_text else "No diagnosis records found"}
164
-
165
- ### Test Results
166
- {"".join(tests_text) if tests_text else "No test records found"}
167
-
168
- **Analysis Instructions**:
169
- 1. Review the complete patient history across all appointments
170
- 2. Identify any inconsistencies in diagnoses or treatments
171
- 3. Check for medication conflicts or changes over time
172
- 4. Note any unresolved symptoms or conditions
173
- 5. Evaluate test result patterns over time
174
- 6. Flag any concerning trends or gaps in care
175
- 7. Provide comprehensive recommendations
176
-
177
- **Required Output Format**:
178
- ### Clinical Summary
179
- [Overview of patient's medical journey]
180
-
181
- ### Treatment Consistency
182
- - [Evaluation of consistency across different doctors]
183
- - [Notable changes in treatment approaches]
184
-
185
- ### Medication Analysis
186
- - [Current medication list]
187
- - [Potential interactions or issues]
188
- - [Changes over time]
189
 
190
- ### Diagnostic Evaluation
191
- - [Confirmed diagnoses]
192
- - [Potential missed diagnoses]
193
- - [Diagnostic consistency across providers]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
- ### Test Result Trends
196
- - [Notable patterns in lab/test results]
197
- - [Concerning values or changes]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- ### Recommended Actions
200
- - [Immediate follow-up needs]
201
- - [Long-term management suggestions]
202
- - [Referral recommendations if needed]
203
- """
204
- return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- def parse_excel_to_patient_prompt(file_path: str) -> str:
207
- """Parse Excel file into a comprehensive patient analysis prompt"""
208
- try:
209
- xl = pd.ExcelFile(file_path)
210
- df = xl.parse(xl.sheet_names[0], header=0).fillna("")
211
- patient_data = extract_patient_data(df)
212
- prompt = generate_patient_prompt(patient_data)
213
- return prompt
214
- except Exception as e:
215
- raise ValueError(f"Error parsing Excel file: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
216
 
217
- def init_agent():
218
- """Initialize the TxAgent with appropriate settings"""
219
- default_tool_path = os.path.abspath("data/new_tool.json")
220
- target_tool_path = os.path.join(tool_cache_dir, "new_tool.json")
221
-
222
- if not os.path.exists(target_tool_path):
223
- shutil.copy(default_tool_path, target_tool_path)
224
 
225
- agent = TxAgent(
226
- model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
227
- rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
228
- tool_files_dict={"new_tool": target_tool_path},
229
- force_finish=True,
230
- enable_checker=True,
231
- step_rag_num=4,
232
- seed=100,
233
- additional_default_tools=[],
234
- )
235
- agent.init_model()
236
- return agent
237
-
238
- def create_ui(agent):
239
- """Create Gradio UI interface"""
240
- with gr.Blocks(theme=gr.themes.Soft(), title="Patient History Analyzer") as demo:
241
  gr.Markdown("# 🏥 Comprehensive Patient History Analysis")
242
 
243
  with gr.Tabs():
244
  with gr.TabItem("Analysis"):
245
  with gr.Row():
246
- # Left column - Inputs
247
  with gr.Column(scale=1):
248
- file_upload = gr.File(
249
  label="Upload Patient Records (Excel)",
250
- file_types=[".xlsx"],
251
- file_count="single",
252
- interactive=True
253
  )
254
- msg_input = gr.Textbox(
255
- label="Additional Instructions",
256
- placeholder="Add any specific analysis requests...",
257
- lines=3
258
  )
259
- with gr.Row():
260
- clear_btn = gr.Button("Clear", variant="secondary")
261
- send_btn = gr.Button("Analyze Full History", variant="primary")
262
 
263
- # Right column - Outputs
264
  with gr.Column(scale=2):
265
- chatbot = gr.Chatbot(
266
- label="Patient Analysis Results",
267
- height=600,
268
- bubble_full_width=False,
269
- show_copy_button=True,
270
- render_markdown=True
271
  )
272
- download_output = gr.File(
273
  label="Download Full Report",
274
  interactive=False
275
  )
@@ -278,118 +281,46 @@ def create_ui(agent):
278
  gr.Markdown("""
279
  ## How to Use This Tool
280
 
281
- 1. **Upload Excel File**: Select the patient's medical records Excel file
282
- 2. **Add Instructions** (Optional): Provide any specific analysis requests
283
- 3. **Click Analyze**: The system will process ALL appointments together
284
- 4. **Review Results**: Comprehensive analysis appears in the chat window
285
- 5. **Download Report**: Get a complete text report of all findings
286
-
287
- ### What This Analyzes
288
- - Complete medical history across all appointments
289
- - Treatment consistency across different doctors
290
- - Medication changes and potential interactions
291
- - Diagnostic patterns and potential oversights
292
- - Test result trends over time
293
- - Comprehensive care recommendations
294
- """)
295
-
296
- def analyze(message: str, chat_history: List[Tuple[str, str]], file) -> Tuple[List[Tuple[str, str]], str]:
297
- """Main analysis function for patient history"""
298
- if not file:
299
- raise gr.Error("Please upload an Excel file first")
300
-
301
- try:
302
- # Initialize chat history
303
- new_history = chat_history + [(message, None)]
304
- new_history.append((None, "⏳ Processing complete patient history..."))
305
- yield new_history, None
306
-
307
- # Generate comprehensive prompt
308
- prompt = parse_excel_to_patient_prompt(file.name)
309
-
310
- # Run analysis
311
- full_output = ""
312
- for result in agent.run_gradio_chat(
313
- message=prompt,
314
- history=[],
315
- temperature=0.2,
316
- max_new_tokens=2048,
317
- max_token=4096,
318
- call_agent=False,
319
- conversation=[],
320
- ):
321
- if isinstance(result, list):
322
- for r in result:
323
- if hasattr(r, 'content') and r.content:
324
- cleaned = clean_response(r.content)
325
- full_output += cleaned + "\n"
326
- elif isinstance(result, str):
327
- cleaned = clean_response(result)
328
- full_output += cleaned + "\n"
329
-
330
- if full_output:
331
- new_history[-1] = (None, full_output.strip())
332
- yield new_history, None
333
-
334
- # Save report
335
- file_hash_value = file_hash(file.name)
336
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
337
- report_path = os.path.join(report_dir, f"patient_{file_hash_value}_{timestamp}_report.md")
338
 
339
- with open(report_path, "w", encoding="utf-8") as f:
340
- f.write("# Comprehensive Patient History Analysis\n\n")
341
- f.write(f"**Generated on**: {timestamp}\n\n")
342
- f.write(f"**Source file**: {file.name}\n\n")
343
- f.write(full_output)
 
 
 
 
344
 
345
- yield new_history, report_path if os.path.exists(report_path) else None
346
-
347
- except Exception as e:
348
- new_history.append((None, f"❌ Error: {str(e)}"))
349
- yield new_history, None
350
- raise gr.Error(f"Analysis failed: {str(e)}")
351
-
352
- def clear_chat():
353
- """Clear chat history and outputs"""
354
- return [], None
355
 
356
- # Event handlers
357
- send_btn.click(
358
- analyze,
359
- inputs=[msg_input, chatbot, file_upload],
360
- outputs=[chatbot, download_output],
361
  api_name="analyze"
362
  )
363
-
364
- msg_input.submit(
365
- analyze,
366
- inputs=[msg_input, chatbot, file_upload],
367
- outputs=[chatbot, download_output]
368
- )
369
-
370
- clear_btn.click(
371
- clear_chat,
372
- inputs=[],
373
- outputs=[chatbot, download_output]
374
- )
375
 
376
  return demo
377
 
378
  if __name__ == "__main__":
379
  try:
380
- agent = init_agent()
381
- demo = create_ui(agent)
382
-
383
- demo.queue(
384
- api_open=False,
385
- max_size=20
386
- ).launch(
387
  server_name="0.0.0.0",
388
  server_port=7860,
389
- show_error=True,
390
- allowed_paths=[report_dir],
391
- share=False
392
  )
393
  except Exception as e:
394
- print(f"Failed to launch application: {str(e)}")
395
  sys.exit(1)
 
1
  import sys
2
  import os
3
  import pandas as pd
 
4
  import gradio as gr
 
 
 
5
  import re
6
+ import hashlib
7
  from datetime import datetime
 
 
8
  from collections import defaultdict
9
+ from typing import List, Dict, Tuple
10
 
11
+ # Configuration
12
+ PERSISTENT_DIR = "/data/hf_cache"
13
+ os.makedirs(os.path.join(PERSISTENT_DIR, "reports"), exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ class PatientHistoryAnalyzer:
16
+ def __init__(self):
17
+ self.max_token_length = 2000 # Conservative limit
18
+ self.max_text_length = 500 # Characters per field
19
 
20
+ def clean_text(self, text: str) -> str:
21
+ """Clean and normalize text fields"""
22
+ if not isinstance(text, str):
23
+ text = str(text)
24
+ text = re.sub(r'\s+', ' ', text).strip()
25
+ return text[:self.max_text_length]
26
 
27
+ def process_excel(self, file_path: str) -> Dict[str, List]:
28
+ """Process Excel file into structured patient data"""
29
+ try:
30
+ df = pd.read_excel(file_path)
31
+ df = df.sort_values('Interview Date')
32
+
33
+ data = {
34
+ 'timeline': [],
35
+ 'medications': defaultdict(list),
36
+ 'diagnoses': defaultdict(list),
37
+ 'tests': defaultdict(list),
38
+ 'doctors': set(),
39
+ 'all_entries': [] # For full history analysis
40
+ }
41
+
42
+ for _, row in df.iterrows():
43
+ entry = {
44
+ 'date': self.clean_text(row.get('Interview Date', '')),
45
+ 'doctor': self.clean_text(row.get('Interviewer', '')),
46
+ 'form': self.clean_text(row.get('Form Name', '')),
47
+ 'item': self.clean_text(row.get('Form Item', '')),
48
+ 'response': self.clean_text(row.get('Item Response', '')),
49
+ 'notes': self.clean_text(row.get('Description', ''))
50
+ }
51
+
52
+ data['timeline'].append(entry)
53
+ data['doctors'].add(entry['doctor'])
54
+ data['all_entries'].append(entry)
55
+
56
+ # Categorize entries
57
+ form_lower = entry['form'].lower()
58
+ if 'medication' in form_lower or 'drug' in form_lower:
59
+ data['medications'][entry['item']].append(entry)
60
+ elif 'diagnosis' in form_lower:
61
+ data['diagnoses'][entry['item']].append(entry)
62
+ elif 'test' in form_lower or 'lab' in form_lower:
63
+ data['tests'][entry['item']].append(entry)
64
+
65
+ return data
66
+
67
+ except Exception as e:
68
+ raise ValueError(f"Error processing Excel file: {str(e)}")
69
 
70
+ def generate_analysis_prompt(self, patient_data: Dict) -> List[Dict]:
71
+ """Generate analysis prompts that respect token limits"""
72
+ prompts = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # 1. Current Status Prompt (most recent data)
75
+ current_prompt = self._create_current_status_prompt(patient_data)
76
+ prompts.append({
77
+ 'type': 'current_status',
78
+ 'content': current_prompt,
79
+ 'token_estimate': len(current_prompt.split()) # Rough estimate
80
+ })
81
 
82
+ # 2. Historical Analysis Prompt (if needed)
83
+ if len(patient_data['all_entries']) > 10:
84
+ history_prompt = self._create_historical_prompt(patient_data)
85
+ prompts.append({
86
+ 'type': 'historical',
87
+ 'content': history_prompt,
88
+ 'token_estimate': len(history_prompt.split())
 
 
 
 
89
  })
90
+
91
+ # 3. Medication-Specific Prompt (if complex medication history)
92
+ if len(patient_data['medications']) > 3:
93
+ meds_prompt = self._create_medication_prompt(patient_data)
94
+ prompts.append({
95
+ 'type': 'medications',
96
+ 'content': meds_prompt,
97
+ 'token_estimate': len(meds_prompt.split())
98
  })
99
 
100
+ return prompts
 
 
 
 
 
 
 
 
101
 
102
+ def _create_current_status_prompt(self, data: Dict) -> str:
103
+ """Create prompt for current patient status"""
104
+ recent_entries = data['timeline'][-10:] # Last 10 entries
105
+
106
+ prompt_lines = [
107
+ "**Comprehensive Patient Status Analysis**",
108
+ "Focus on RECENT appointments and CURRENT health status.",
109
+ "Analyze for:",
110
+ "- Medication consistency",
111
+ "- Diagnostic agreement between providers",
112
+ "- Recent concerning findings",
113
+ "- Immediate follow-up needs",
114
+ "",
115
+ "**Recent Timeline (last 10 entries):**"
116
+ ]
117
+
118
+ for entry in recent_entries:
119
+ prompt_lines.append(
120
+ f"- {entry['date']}: {entry['form']} - {entry['item']} = {entry['response']} (by {entry['doctor']})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  )
122
+
123
+ prompt_lines.extend([
124
+ "",
125
+ "**Current Medications:**",
126
+ *[f"- {med}: {entries[-1]['response']} (last updated {entries[-1]['date']})"
127
+ for med, entries in data['medications'].items()],
128
+ "",
129
+ "**Active Diagnoses:**",
130
+ *[f"- {diag}: {entries[-1]['response']} (last updated {entries[-1]['date']})"
131
+ for diag, entries in data['diagnoses'].items()],
132
+ "",
133
+ "**Required Output Format:**",
134
+ "### Summary of Current Status",
135
+ "### Medication Review",
136
+ "### Diagnostic Consistency",
137
+ "### Urgent Concerns",
138
+ "### Recommended Actions"
139
+ ])
140
+
141
+ return "\n".join(prompt_lines)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
+ def _create_historical_prompt(self, data: Dict) -> str:
144
+ """Create prompt for historical analysis"""
145
+ return "\n".join([
146
+ "**Historical Patient Analysis**",
147
+ "Focus on LONG-TERM PATTERNS and HISTORY.",
148
+ "",
149
+ "**Key Analysis Points:**",
150
+ "- Treatment changes over time",
151
+ "- Recurring symptoms/issues",
152
+ "- Diagnostic evolution",
153
+ "- Medication history",
154
+ "",
155
+ "**Historical Timeline (condensed):**",
156
+ *[f"- {entry['date'][:7]}: {entry['form']} - {entry['response']}"
157
+ for entry in data['all_entries'][:-10]], # All except recent 10
158
+ "",
159
+ "**Required Output Format:**",
160
+ "### Historical Patterns",
161
+ "### Treatment Evolution",
162
+ "### Chronic Issues",
163
+ "### Long-term Recommendations"
164
+ ])
165
 
166
+ def _create_medication_prompt(self, data: Dict) -> str:
167
+ """Create medication-specific prompt"""
168
+ return "\n".join([
169
+ "**Medication-Specific Analysis**",
170
+ "Focus on MEDICATION HISTORY and POTENTIAL ISSUES.",
171
+ "",
172
+ "**Medication History:**",
173
+ *[f"- {med}: " + ", ".join(
174
+ f"{e['date']}: {e['response']} (by {e['doctor']})"
175
+ for e in entries
176
+ ) for med, entries in data['medications'].items()],
177
+ "",
178
+ "**Analysis Focus:**",
179
+ "- Potential interactions",
180
+ "- Dosage changes",
181
+ "- Prescriber patterns",
182
+ "- Adherence issues",
183
+ "",
184
+ "**Required Output Format:**",
185
+ "### Medication Summary",
186
+ "### Potential Issues",
187
+ "### Prescriber Patterns",
188
+ "### Recommendations"
189
+ ])
190
 
191
+ def generate_report(self, analysis_results: List[str]) -> Tuple[str, str]:
192
+ """Combine analysis results into final report"""
193
+ report = [
194
+ "# Comprehensive Patient History Analysis",
195
+ f"**Generated on**: {datetime.now().strftime('%Y-%m-%d %H:%M')}",
196
+ ""
197
+ ]
198
+
199
+ # Add each analysis section
200
+ for result in analysis_results:
201
+ report.extend(["", "---", "", result])
202
+
203
+ # Add summary section
204
+ report.extend([
205
+ "",
206
+ "## Overall Clinical Summary",
207
+ "This report combines analyses of:",
208
+ "- Current health status",
209
+ "- Historical patterns",
210
+ "- Medication history",
211
+ "",
212
+ "**Key Takeaways:**",
213
+ "[Generated summary of most critical findings would appear here]"
214
+ ])
215
+
216
+ full_report = "\n".join(report)
217
+
218
+ # Save to file
219
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
220
+ report_path = os.path.join(PERSISTENT_DIR, "reports", f"patient_report_{timestamp}.md")
221
+ with open(report_path, 'w') as f:
222
+ f.write(full_report)
223
+
224
+ return full_report, report_path
225
 
226
+ def analyze(self, file_path: str) -> Tuple[str, str]:
227
+ """Main analysis workflow"""
228
+ try:
229
+ # Process data
230
+ patient_data = self.process_excel(file_path)
231
+
232
+ # Generate prompts (simulating LLM analysis)
233
+ prompts = self.generate_analysis_prompt(patient_data)
234
+
235
+ # Simulate LLM responses (in a real system, you'd call your LLM here)
236
+ simulated_responses = [
237
+ "### Summary of Current Status\nPatient shows improvement in blood pressure control but new concerns about medication side effects...",
238
+ "### Historical Patterns\nChronic back pain has been a consistent issue across 5 providers over 3 years...",
239
+ "### Medication Summary\nCurrent regimen includes 4 medications with one potential interaction between..."
240
+ ]
241
+
242
+ # Generate final report
243
+ return self.generate_report(simulated_responses)
244
+
245
+ except Exception as e:
246
+ return f"Error during analysis: {str(e)}", ""
247
 
248
+ # Gradio Interface
249
+ def create_interface():
250
+ analyzer = PatientHistoryAnalyzer()
 
 
 
 
251
 
252
+ with gr.Blocks(title="Patient History Analyzer", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  gr.Markdown("# 🏥 Comprehensive Patient History Analysis")
254
 
255
  with gr.Tabs():
256
  with gr.TabItem("Analysis"):
257
  with gr.Row():
 
258
  with gr.Column(scale=1):
259
+ file_input = gr.File(
260
  label="Upload Patient Records (Excel)",
261
+ file_types=[".xlsx"],
262
+ type="filepath"
 
263
  )
264
+ additional_instructions = gr.Textbox(
265
+ label="Special Instructions (Optional)",
266
+ placeholder="E.g. 'Focus on pain management history'"
 
267
  )
268
+ analyze_btn = gr.Button("Analyze Full History", variant="primary")
 
 
269
 
 
270
  with gr.Column(scale=2):
271
+ output_display = gr.Markdown(
272
+ label="Analysis Results",
273
+ elem_id="results"
 
 
 
274
  )
275
+ report_download = gr.File(
276
  label="Download Full Report",
277
  interactive=False
278
  )
 
281
  gr.Markdown("""
282
  ## How to Use This Tool
283
 
284
+ 1. **Upload** your patient's Excel file containing all medical encounters
285
+ 2. **Click Analyze** to process the complete history
286
+ 3. **Review** the comprehensive analysis
287
+ 4. **Download** the full report
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ ### File Requirements
290
+ Excel file must contain these columns:
291
+ - Booking Number
292
+ - Form Name
293
+ - Form Item
294
+ - Item Response
295
+ - Interview Date
296
+ - Interviewer
297
+ - Description
298
 
299
+ ### Analysis Includes
300
+ - Current health status
301
+ - Medication history
302
+ - Diagnostic consistency
303
+ - Treatment patterns
304
+ - Clinical recommendations
305
+ """)
 
 
 
306
 
307
+ analyze_btn.click(
308
+ fn=analyzer.analyze,
309
+ inputs=file_input,
310
+ outputs=[output_display, report_download],
 
311
  api_name="analyze"
312
  )
 
 
 
 
 
 
 
 
 
 
 
 
313
 
314
  return demo
315
 
316
  if __name__ == "__main__":
317
  try:
318
+ demo = create_interface()
319
+ demo.launch(
 
 
 
 
 
320
  server_name="0.0.0.0",
321
  server_port=7860,
322
+ show_error=True
 
 
323
  )
324
  except Exception as e:
325
+ print(f"Error launching application: {str(e)}")
326
  sys.exit(1)