File size: 2,083 Bytes
c578186 5b26f80 0344bc2 5b26f80 0344bc2 5b26f80 0344bc2 5b26f80 0344bc2 5b26f80 0344bc2 5b26f80 0344bc2 c578186 0344bc2 5b26f80 c578186 0344bc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from config import agent, logger, analysis_collection
from datetime import datetime
import asyncio
async def analyze_patient_report(patient_id, report_content, file_type, file_content):
try:
# Simulate analysis (replace with actual logic)
conversation = [{"role": "system", "content": agent.chat_prompt}]
conversation.append({"role": "user", "content": f"Analyze this report for suicide risk: {report_content}"})
input_ids = agent.tokenizer.apply_chat_template(
conversation, add_generation_prompt=True, return_tensors="pt"
).to(agent.device)
output = agent.model.generate(
input_ids,
do_sample=True,
temperature=0.5,
max_new_tokens=1024,
pad_token_id=agent.tokenizer.eos_token_id,
return_dict_in_generate=True
)
text = agent.tokenizer.decode(output["sequences"][0][input_ids.shape[1]:], skip_special_tokens=True)
# Parse the text to extract risk level and score (simplified example)
risk_level = "moderate" # Replace with actual parsing logic
risk_score = 0.7 # Replace with actual parsing logic
analysis = {
"patient_id": patient_id,
"report_content": report_content,
"file_type": file_type,
"timestamp": datetime.utcnow(),
"suicide_risk": {
"level": risk_level,
"score": risk_score,
"factors": ["depression", "isolation"] # Example factors
},
"summary": {
"summary": "Patient shows signs of moderate risk.",
"recommendations": "Monitor closely and schedule follow-up."
}
}
await analysis_collection.insert_one(analysis)
logger.info(f"Analysis completed for patient {patient_id} at {datetime.utcnow().isoformat()}")
return analysis
except Exception as e:
logger.error(f"Error analyzing patient report: {str(e)} at {datetime.utcnow().isoformat()}")
raise |