Ali2206 commited on
Commit
57bcbf7
·
verified ·
1 Parent(s): 3d8532a

Update analysis.py

Browse files
Files changed (1) hide show
  1. analysis.py +168 -39
analysis.py CHANGED
@@ -1,50 +1,179 @@
1
- from config import agent, logger, analysis_collection
 
 
 
2
  from datetime import datetime
3
  import asyncio
 
 
4
 
5
- async def analyze_patient_report(patient_id, report_content, file_type, file_content):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  try:
7
- # Simulate analysis (replace with actual logic)
8
- conversation = [{"role": "system", "content": agent.chat_prompt}]
9
- conversation.append({"role": "user", "content": f"Analyze this report for suicide risk: {report_content}"})
10
-
11
- input_ids = agent.tokenizer.apply_chat_template(
12
- conversation, add_generation_prompt=True, return_tensors="pt"
13
- ).to(agent.device)
14
-
15
- output = agent.model.generate(
16
- input_ids,
17
- do_sample=True,
18
- temperature=0.5,
19
- max_new_tokens=1024,
20
- pad_token_id=agent.tokenizer.eos_token_id,
21
- return_dict_in_generate=True
22
- )
23
 
24
- text = agent.tokenizer.decode(output["sequences"][0][input_ids.shape[1]:], skip_special_tokens=True)
25
- # Parse the text to extract risk level and score (simplified example)
26
- risk_level = "moderate" # Replace with actual parsing logic
27
- risk_score = 0.7 # Replace with actual parsing logic
 
 
 
 
 
 
28
 
29
- analysis = {
 
 
 
 
 
 
 
 
 
 
 
30
  "patient_id": patient_id,
31
- "report_content": report_content,
32
- "file_type": file_type,
33
  "timestamp": datetime.utcnow(),
34
- "suicide_risk": {
35
- "level": risk_level,
36
- "score": risk_score,
37
- "factors": ["depression", "isolation"] # Example factors
38
- },
39
- "summary": {
40
- "summary": "Patient shows signs of moderate risk.",
41
- "recommendations": "Monitor closely and schedule follow-up."
42
- }
43
  }
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- await analysis_collection.insert_one(analysis)
46
- logger.info(f"Analysis completed for patient {patient_id} at {datetime.utcnow().isoformat()}")
47
- return analysis
48
  except Exception as e:
49
- logger.error(f"Error analyzing patient report: {str(e)} at {datetime.utcnow().isoformat()}")
50
- raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, List
2
+ from config import agent, patients_collection, analysis_collection, alerts_collection, logger
3
+ from models import RiskLevel
4
+ from utils import structure_medical_response, compute_file_content_hash, compute_patient_data_hash, serialize_patient
5
  from datetime import datetime
6
  import asyncio
7
+ import json
8
+ import re
9
 
10
+ async def create_alert(patient_id: str, risk_data: dict):
11
+ alert_doc = {
12
+ "patient_id": patient_id,
13
+ "type": "suicide_risk",
14
+ "level": risk_data["level"],
15
+ "score": risk_data["score"],
16
+ "factors": risk_data["factors"],
17
+ "timestamp": datetime.utcnow(),
18
+ "acknowledged": False
19
+ }
20
+ await alerts_collection.insert_one(alert_doc)
21
+ logger.warning(f"⚠️ Created suicide risk alert for patient {patient_id}")
22
+
23
+ async def analyze_patient_report(patient_id: Optional[str], report_content: str, file_type: str, file_content: bytes):
24
+ identifier = patient_id if patient_id else compute_file_content_hash(file_content)
25
+ report_data = {"identifier": identifier, "content": report_content, "file_type": file_type}
26
+ report_hash = compute_patient_data_hash(report_data)
27
+ logger.info(f"🧾 Analyzing report for identifier: {identifier}")
28
+
29
+ existing_analysis = await analysis_collection.find_one({"identifier": identifier, "report_hash": report_hash})
30
+ if existing_analysis:
31
+ logger.info(f"✅ No changes in report data for {identifier}, skipping analysis")
32
+ return existing_analysis
33
+
34
+ prompt = (
35
+ "You are a clinical decision support AI. Analyze the following patient report:\n"
36
+ "1. Summarize the patient's medical history.\n"
37
+ "2. Identify risks or red flags (including mental health and suicide risk).\n"
38
+ "3. Highlight missed diagnoses or treatments.\n"
39
+ "4. Suggest next clinical steps.\n"
40
+ f"\nPatient Report ({file_type}):\n{'-'*40}\n{report_content[:10000]}"
41
+ )
42
+
43
+ raw_response = agent.chat(
44
+ message=prompt,
45
+ history=[],
46
+ temperature=0.7,
47
+ max_new_tokens=1024
48
+ )
49
+ structured_response = structure_medical_response(raw_response)
50
+
51
+ risk_level, risk_score, risk_factors = detect_suicide_risk(raw_response)
52
+ suicide_risk = {
53
+ "level": risk_level.value,
54
+ "score": risk_score,
55
+ "factors": risk_factors
56
+ }
57
+
58
+ analysis_doc = {
59
+ "identifier": identifier,
60
+ "patient_id": patient_id,
61
+ "timestamp": datetime.utcnow(),
62
+ "summary": structured_response,
63
+ "suicide_risk": suicide_risk,
64
+ "raw": raw_response,
65
+ "report_hash": report_hash,
66
+ "file_type": file_type
67
+ }
68
+
69
+ await analysis_collection.update_one(
70
+ {"identifier": identifier, "report_hash": report_hash},
71
+ {"$set": analysis_doc},
72
+ upsert=True
73
+ )
74
+
75
+ if patient_id and risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
76
+ await create_alert(patient_id, suicide_risk)
77
+
78
+ logger.info(f"✅ Stored analysis for identifier {identifier}")
79
+ return analysis_doc
80
+
81
+ async def analyze_patient(patient: dict):
82
  try:
83
+ serialized = serialize_patient(patient)
84
+ patient_id = serialized.get("fhir_id")
85
+ patient_hash = compute_patient_data_hash(serialized)
86
+ logger.info(f"🧾 Analyzing patient: {patient_id}")
87
+
88
+ existing_analysis = await analysis_collection.find_one({"patient_id": patient_id})
89
+ if existing_analysis and existing_analysis.get("data_hash") == patient_hash:
90
+ logger.info(f"✅ No changes in patient data for {patient_id}, skipping analysis")
91
+ return
 
 
 
 
 
 
 
92
 
93
+ doc = json.dumps(serialized, indent=2)
94
+ message = (
95
+ "You are a clinical decision support AI.\n\n"
96
+ "Given the patient document below:\n"
97
+ "1. Summarize the patient's medical history.\n"
98
+ "2. Identify risks or red flags (including mental health and suicide risk).\n"
99
+ "3. Highlight missed diagnoses or treatments.\n"
100
+ "4. Suggest next clinical steps.\n"
101
+ f"\nPatient Document:\n{'-'*40}\n{doc[:10000]}"
102
+ )
103
 
104
+ raw = agent.chat(message=message, history=[], temperature=0.7, max_new_tokens=1024)
105
+ structured = structure_medical_response(raw)
106
+
107
+ risk_level, risk_score, risk_factors = detect_suicide_risk(raw)
108
+ suicide_risk = {
109
+ "level": risk_level.value,
110
+ "score": risk_score,
111
+ "factors": risk_factors
112
+ }
113
+
114
+ analysis_doc = {
115
+ "identifier": patient_id,
116
  "patient_id": patient_id,
 
 
117
  "timestamp": datetime.utcnow(),
118
+ "summary": structured,
119
+ "suicide_risk": suicide_risk,
120
+ "raw": raw,
121
+ "data_hash": patient_hash
 
 
 
 
 
122
  }
123
+
124
+ await analysis_collection.update_one(
125
+ {"identifier": patient_id},
126
+ {"$set": analysis_doc},
127
+ upsert=True
128
+ )
129
+
130
+ if risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
131
+ await create_alert(patient_id, suicide_risk)
132
+
133
+ logger.info(f"✅ Stored analysis for patient {patient_id}")
134
 
 
 
 
135
  except Exception as e:
136
+ logger.error(f"Error analyzing patient: {e}")
137
+
138
+ def detect_suicide_risk(text: str) -> Tuple[RiskLevel, float, List[str]]:
139
+ suicide_keywords = [
140
+ 'suicide', 'suicidal', 'kill myself', 'end my life',
141
+ 'want to die', 'self-harm', 'self harm', 'hopeless',
142
+ 'no reason to live', 'plan to die'
143
+ ]
144
+ explicit_mentions = [kw for kw in suicide_keywords if kw in text.lower()]
145
+ if not explicit_mentions:
146
+ return RiskLevel.NONE, 0.0, []
147
+
148
+ assessment_prompt = (
149
+ "Assess the suicide risk level based on this text. "
150
+ "Consider frequency, specificity, and severity of statements. "
151
+ "Respond with JSON format: {\"risk_level\": \"low/moderate/high/severe\", "
152
+ "\"risk_score\": 0-1, \"factors\": [\"list of risk factors\"]}\n\n"
153
+ f"Text to assess:\n{text}"
154
+ )
155
+
156
+ try:
157
+ response = agent.chat(
158
+ message=assessment_prompt,
159
+ history=[],
160
+ temperature=0.2,
161
+ max_new_tokens=256
162
+ )
163
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
164
+ if json_match:
165
+ assessment = json.loads(json_match.group())
166
+ return (
167
+ RiskLevel(assessment.get("risk_level", "none").lower()),
168
+ float(assessment.get("risk_score", 0)),
169
+ assessment.get("factors", [])
170
+ )
171
+ except Exception as e:
172
+ logger.error(f"Error in suicide risk assessment: {e}")
173
+
174
+ risk_score = min(0.1 * len(explicit_mentions), 0.9)
175
+ if risk_score > 0.7:
176
+ return RiskLevel.HIGH, risk_score, explicit_mentions
177
+ elif risk_score > 0.4:
178
+ return RiskLevel.MODERATE, risk_score, explicit_mentions
179
+ return RiskLevel.LOW, risk_score, explicit_mentions