Ali2206 commited on
Commit
5b26f80
·
verified ·
1 Parent(s): 5eef1a8

Create analysis.py

Browse files
Files changed (1) hide show
  1. analysis.py +177 -0
analysis.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from config import agent, patients_collection, analysis_collection, alerts_collection, logger
3
+ from models import RiskLevel
4
+ from utils import structure_medical_response
5
+ from datetime import datetime
6
+ import asyncio
7
+
8
+ async def create_alert(patient_id: str, risk_data: dict):
9
+ alert_doc = {
10
+ "patient_id": patient_id,
11
+ "type": "suicide_risk",
12
+ "level": risk_data["level"],
13
+ "score": risk_data["score"],
14
+ "factors": risk_data["factors"],
15
+ "timestamp": datetime.utcnow(),
16
+ "acknowledged": False
17
+ }
18
+ await alerts_collection.insert_one(alert_doc)
19
+ logger.warning(f"⚠️ Created suicide risk alert for patient {patient_id}")
20
+
21
+ async def analyze_patient_report(patient_id: Optional[str], report_content: str, file_type: str, file_content: bytes):
22
+ identifier = patient_id if patient_id else compute_file_content_hash(file_content)
23
+ report_data = {"identifier": identifier, "content": report_content, "file_type": file_type}
24
+ report_hash = compute_patient_data_hash(report_data)
25
+ logger.info(f"🧾 Analyzing report for identifier: {identifier}")
26
+
27
+ existing_analysis = await analysis_collection.find_one({"identifier": identifier, "report_hash": report_hash})
28
+ if existing_analysis:
29
+ logger.info(f"✅ No changes in report data for {identifier}, skipping analysis")
30
+ return existing_analysis
31
+
32
+ prompt = (
33
+ "You are a clinical decision support AI. Analyze the following patient report:\n"
34
+ "1. Summarize the patient's medical history.\n"
35
+ "2. Identify risks or red flags (including mental health and suicide risk).\n"
36
+ "3. Highlight missed diagnoses or treatments.\n"
37
+ "4. Suggest next clinical steps.\n"
38
+ f"\nPatient Report ({file_type}):\n{'-'*40}\n{report_content[:10000]}"
39
+ )
40
+
41
+ raw_response = agent.chat(
42
+ message=prompt,
43
+ history=[],
44
+ temperature=0.7,
45
+ max_new_tokens=1024
46
+ )
47
+ structured_response = structure_medical_response(raw_response)
48
+
49
+ risk_level, risk_score, risk_factors = detect_suicide_risk(raw_response)
50
+ suicide_risk = {
51
+ "level": risk_level.value,
52
+ "score": risk_score,
53
+ "factors": risk_factors
54
+ }
55
+
56
+ analysis_doc = {
57
+ "identifier": identifier,
58
+ "patient_id": patient_id,
59
+ "timestamp": datetime.utcnow(),
60
+ "summary": structured_response,
61
+ "suicide_risk": suicide_risk,
62
+ "raw": raw_response,
63
+ "report_hash": report_hash,
64
+ "file_type": file_type
65
+ }
66
+
67
+ await analysis_collection.update_one(
68
+ {"identifier": identifier, "report_hash": report_hash},
69
+ {"$set": analysis_doc},
70
+ upsert=True
71
+ )
72
+
73
+ if patient_id and risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
74
+ await create_alert(patient_id, suicide_risk)
75
+
76
+ logger.info(f"✅ Stored analysis for identifier {identifier}")
77
+ return analysis_doc
78
+
79
+ async def analyze_patient(patient: dict):
80
+ try:
81
+ serialized = serialize_patient(patient)
82
+ patient_id = serialized.get("fhir_id")
83
+ patient_hash = compute_patient_data_hash(serialized)
84
+ logger.info(f"🧾 Analyzing patient: {patient_id}")
85
+
86
+ existing_analysis = await analysis_collection.find_one({"patient_id": patient_id})
87
+ if existing_analysis and existing_analysis.get("data_hash") == patient_hash:
88
+ logger.info(f"✅ No changes in patient data for {patient_id}, skipping analysis")
89
+ return
90
+
91
+ doc = json.dumps(serialized, indent=2)
92
+ message = (
93
+ "You are a clinical decision support AI.\n\n"
94
+ "Given the patient document below:\n"
95
+ "1. Summarize the patient's medical history.\n"
96
+ "2. Identify risks or red flags (including mental health and suicide risk).\n"
97
+ "3. Highlight missed diagnoses or treatments.\n"
98
+ "4. Suggest next clinical steps.\n"
99
+ f"\nPatient Document:\n{'-'*40}\n{doc[:10000]}"
100
+ )
101
+
102
+ raw = agent.chat(message=message, history=[], temperature=0.7, max_new_tokens=1024)
103
+ structured = structure_medical_response(raw)
104
+
105
+ risk_level, risk_score, risk_factors = detect_suicide_risk(raw)
106
+ suicide_risk = {
107
+ "level": risk_level.value,
108
+ "score": risk_score,
109
+ "factors": risk_factors
110
+ }
111
+
112
+ analysis_doc = {
113
+ "identifier": patient_id,
114
+ "patient_id": patient_id,
115
+ "timestamp": datetime.utcnow(),
116
+ "summary": structured,
117
+ "suicide_risk": suicide_risk,
118
+ "raw": raw,
119
+ "data_hash": patient_hash
120
+ }
121
+
122
+ await analysis_collection.update_one(
123
+ {"identifier": patient_id},
124
+ {"$set": analysis_doc},
125
+ upsert=True
126
+ )
127
+
128
+ if risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
129
+ await create_alert(patient_id, suicide_risk)
130
+
131
+ logger.info(f"✅ Stored analysis for patient {patient_id}")
132
+
133
+ except Exception as e:
134
+ logger.error(f"Error analyzing patient: {e}")
135
+
136
+ def detect_suicide_risk(text: str) -> Tuple[RiskLevel, float, List[str]]:
137
+ suicide_keywords = [
138
+ 'suicide', 'suicidal', 'kill myself', 'end my life',
139
+ 'want to die', 'self-harm', 'self harm', 'hopeless',
140
+ 'no reason to live', 'plan to die'
141
+ ]
142
+ explicit_mentions = [kw for kw in suicide_keywords if kw in text.lower()]
143
+ if not explicit_mentions:
144
+ return RiskLevel.NONE, 0.0, []
145
+
146
+ assessment_prompt = (
147
+ "Assess the suicide risk level based on this text. "
148
+ "Consider frequency, specificity, and severity of statements. "
149
+ "Respond with JSON format: {\"risk_level\": \"low/moderate/high/severe\", "
150
+ "\"risk_score\": 0-1, \"factors\": [\"list of risk factors\"]}\n\n"
151
+ f"Text to assess:\n{text}"
152
+ )
153
+
154
+ try:
155
+ response = agent.chat(
156
+ message=assessment_prompt,
157
+ history=[],
158
+ temperature=0.2,
159
+ max_new_tokens=256
160
+ )
161
+ json_match = re.search(r'\{.*\}', response, re.DOTALL)
162
+ if json_match:
163
+ assessment = json.loads(json_match.group())
164
+ return (
165
+ RiskLevel(assessment.get("risk_level", "none").lower()),
166
+ float(assessment.get("risk_score", 0)),
167
+ assessment.get("factors", [])
168
+ )
169
+ except Exception as e:
170
+ logger.error(f"Error in suicide risk assessment: {e}")
171
+
172
+ risk_score = min(0.1 * len(explicit_mentions), 0.9)
173
+ if risk_score > 0.7:
174
+ return RiskLevel.HIGH, risk_score, explicit_mentions
175
+ elif risk_score > 0.4:
176
+ return RiskLevel.MODERATE, risk_score, explicit_mentions
177
+ return RiskLevel.LOW, risk_score, explicit_mentions