Ali2206 commited on
Commit
a8d2446
·
verified ·
1 Parent(s): 3d25835

Create endpoints.py

Browse files
Files changed (1) hide show
  1. endpoints.py +242 -0
endpoints.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Depends, HTTPException, UploadFile, File, Query, Form
2
+ from fastapi.responses import StreamingResponse, JSONResponse
3
+ from fastapi.encoders import jsonable_encoder
4
+ from config import app, agent, logger
5
+ from models import ChatRequest, VoiceOutputRequest, RiskLevel
6
+ from auth import get_current_user
7
+ from utils import clean_text_response
8
+ from analysis import analyze_patient_report, analyze_all_patients
9
+ from voice import recognize_speech, text_to_speech, extract_text_from_pdf
10
+ from docx import Document
11
+ import re
12
+ import mimetypes
13
+ from bson import ObjectId
14
+ from datetime import datetime
15
+ import asyncio
16
+
17
+ @app.get("/status")
18
+ async def status(current_user: dict = Depends(get_current_user)):
19
+ logger.info(f"Status endpoint accessed by {current_user['email']}")
20
+ return {
21
+ "status": "running",
22
+ "timestamp": datetime.utcnow().isoformat(),
23
+ "version": "2.6.0",
24
+ "features": ["chat", "voice-input", "voice-output", "patient-analysis", "report-upload"]
25
+ }
26
+
27
+ @app.get("/patients/analysis-results")
28
+ async def get_patient_analysis_results(
29
+ name: Optional[str] = Query(None),
30
+ current_user: dict = Depends(get_current_user)
31
+ ):
32
+ logger.info(f"Fetching analysis results by {current_user['email']}")
33
+ try:
34
+ query = {}
35
+ if name:
36
+ name_regex = re.compile(name, re.IGNORECASE)
37
+ matching_patients = await patients_collection.find({"full_name": name_regex}).to_list(length=None)
38
+ patient_ids = [p["fhir_id"] for p in matching_patients if "fhir_id" in p]
39
+ if not patient_ids:
40
+ return []
41
+ query = {"patient_id": {"$in": patient_ids}}
42
+
43
+ analyses = await analysis_collection.find(query).sort("timestamp", -1).to_list(length=100)
44
+ enriched_results = []
45
+ for analysis in analyses:
46
+ patient = await patients_collection.find_one({"fhir_id": analysis.get("patient_id")})
47
+ if patient:
48
+ analysis["full_name"] = patient.get("full_name", "Unknown")
49
+ analysis["_id"] = str(analysis["_id"])
50
+ enriched_results.append(analysis)
51
+
52
+ return enriched_results
53
+
54
+ except Exception as e:
55
+ logger.error(f"Error fetching analysis results: {e}")
56
+ raise HTTPException(status_code=500, detail="Failed to retrieve analysis results")
57
+
58
+ @app.post("/chat-stream")
59
+ async def chat_stream_endpoint(
60
+ request: ChatRequest,
61
+ current_user: dict = Depends(get_current_user)
62
+ ):
63
+ logger.info(f"Chat stream initiated by {current_user['email']}")
64
+ async def token_stream():
65
+ try:
66
+ conversation = [{"role": "system", "content": agent.chat_prompt}]
67
+ if request.history:
68
+ conversation.extend(request.history)
69
+ conversation.append({"role": "user", "content": request.message})
70
+
71
+ input_ids = agent.tokenizer.apply_chat_template(
72
+ conversation, add_generation_prompt=True, return_tensors="pt"
73
+ ).to(agent.device)
74
+
75
+ output = agent.model.generate(
76
+ input_ids,
77
+ do_sample=True,
78
+ temperature=request.temperature,
79
+ max_new_tokens=request.max_new_tokens,
80
+ pad_token_id=agent.tokenizer.eos_token_id,
81
+ return_dict_in_generate=True
82
+ )
83
+
84
+ text = agent.tokenizer.decode(output["sequences"][0][input_ids.shape[1]:], skip_special_tokens=True)
85
+ for chunk in text.split():
86
+ yield chunk + " "
87
+ await asyncio.sleep(0.05)
88
+ except Exception as e:
89
+ logger.error(f"Streaming error: {e}")
90
+ yield f"⚠️ Error: {e}"
91
+
92
+ return StreamingResponse(token_stream(), media_type="text/plain")
93
+
94
+ @app.post("/voice/transcribe")
95
+ async def transcribe_voice(
96
+ audio: UploadFile = File(...),
97
+ language: str = Query("en-US", description="Language code for speech recognition"),
98
+ current_user: dict = Depends(get_current_user)
99
+ ):
100
+ logger.info(f"Voice transcription initiated by {current_user['email']}")
101
+ try:
102
+ audio_data = await audio.read()
103
+ if not audio.filename.lower().endswith(('.wav', '.mp3', '.ogg', '.flac')):
104
+ raise HTTPException(status_code=400, detail="Unsupported audio format")
105
+
106
+ text = recognize_speech(audio_data, language)
107
+ return {"text": text}
108
+
109
+ except HTTPException:
110
+ raise
111
+ except Exception as e:
112
+ logger.error(f"Error in voice transcription: {e}")
113
+ raise HTTPException(status_code=500, detail="Error processing voice input")
114
+
115
+ @app.post("/voice/synthesize")
116
+ async def synthesize_voice(
117
+ request: VoiceOutputRequest,
118
+ current_user: dict = Depends(get_current_user)
119
+ ):
120
+ logger.info(f"Voice synthesis initiated by {current_user['email']}")
121
+ try:
122
+ audio_data = text_to_speech(request.text, request.language, request.slow)
123
+
124
+ if request.return_format == "base64":
125
+ return {"audio": base64.b64encode(audio_data).decode('utf-8')}
126
+ else:
127
+ return StreamingResponse(
128
+ io.BytesIO(audio_data),
129
+ media_type="audio/mpeg",
130
+ headers={"Content-Disposition": "attachment; filename=speech.mp3"}
131
+ )
132
+
133
+ except HTTPException:
134
+ raise
135
+ except Exception as e:
136
+ logger.error(f"Error in voice synthesis: {e}")
137
+ raise HTTPException(status_code=500, detail="Error generating voice output")
138
+
139
+ @app.post("/voice/chat")
140
+ async def voice_chat_endpoint(
141
+ audio: UploadFile = File(...),
142
+ language: str = Query("en-US", description="Language code for speech recognition"),
143
+ temperature: float = Query(0.7, ge=0.1, le=1.0),
144
+ max_new_tokens: int = Query(512, ge=50, le=1024),
145
+ current_user: dict = Depends(get_current_user)
146
+ ):
147
+ logger.info(f"Voice chat initiated by {current_user['email']}")
148
+ try:
149
+ audio_data = await audio.read()
150
+ user_message = recognize_speech(audio_data, language)
151
+
152
+ chat_response = agent.chat(
153
+ message=user_message,
154
+ history=[],
155
+ temperature=temperature,
156
+ max_new_tokens=max_new_tokens
157
+ )
158
+
159
+ audio_data = text_to_speech(chat_response, language.split('-')[0])
160
+
161
+ return StreamingResponse(
162
+ io.BytesIO(audio_data),
163
+ media_type="audio/mpeg",
164
+ headers={"Content-Disposition": "attachment; filename=response.mp3"}
165
+ )
166
+
167
+ except HTTPException:
168
+ raise
169
+ except Exception as e:
170
+ logger.error(f"Error in voice chat: {e}")
171
+ raise HTTPException(status_code=500, detail="Error processing voice chat")
172
+
173
+ @app.post("/analyze-report")
174
+ async def analyze_clinical_report(
175
+ file: UploadFile = File(...),
176
+ patient_id: Optional[str] = Form(None),
177
+ temperature: float = Form(0.5),
178
+ max_new_tokens: int = Form(1024),
179
+ current_user: dict = Depends(get_current_user)
180
+ ):
181
+ logger.info(f"Report analysis initiated by {current_user['email']}")
182
+ try:
183
+ content_type = file.content_type
184
+ allowed_types = [
185
+ 'application/pdf',
186
+ 'text/plain',
187
+ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
188
+ ]
189
+
190
+ if content_type not in allowed_types:
191
+ raise HTTPException(
192
+ status_code=400,
193
+ detail=f"Unsupported file type: {content_type}. Supported types: PDF, TXT, DOCX"
194
+ )
195
+
196
+ file_content = await file.read()
197
+
198
+ if content_type == 'application/pdf':
199
+ text = extract_text_from_pdf(file_content)
200
+ elif content_type == 'text/plain':
201
+ text = file_content.decode('utf-8')
202
+ elif content_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
203
+ doc = Document(io.BytesIO(file_content))
204
+ text = "\n".join([para.text for para in doc.paragraphs])
205
+ else:
206
+ raise HTTPException(status_code=400, detail="Unsupported file type")
207
+
208
+ text = clean_text_response(text)
209
+ if len(text.strip()) < 50:
210
+ raise HTTPException(
211
+ status_code=400,
212
+ detail="Extracted text is too short (minimum 50 characters required)"
213
+ )
214
+
215
+ analysis = await analyze_patient_report(
216
+ patient_id=patient_id,
217
+ report_content=text,
218
+ file_type=content_type,
219
+ file_content=file_content
220
+ )
221
+
222
+ if "_id" in analysis and isinstance(analysis["_id"], ObjectId):
223
+ analysis["_id"] = str(analysis["_id"])
224
+ if "timestamp" in analysis and isinstance(analysis["timestamp"], datetime):
225
+ analysis["timestamp"] = analysis["timestamp"].isoformat()
226
+
227
+ return JSONResponse(content=jsonable_encoder({
228
+ "status": "success",
229
+ "analysis": analysis,
230
+ "patient_id": patient_id,
231
+ "file_type": content_type,
232
+ "file_size": len(file_content)
233
+ }))
234
+
235
+ except HTTPException:
236
+ raise
237
+ except Exception as e:
238
+ logger.error(f"Error in report analysis: {str(e)}")
239
+ raise HTTPException(
240
+ status_code=500,
241
+ detail=f"Failed to analyze report: {str(e)}"
242
+ )