tezuesh commited on
Commit
04cd4ef
·
verified ·
1 Parent(s): 581f5e4

Update server.py

Browse files
Files changed (1) hide show
  1. server.py +27 -3
server.py CHANGED
@@ -125,15 +125,31 @@ def health_check():
125
  # raise HTTPException(status_code=500, detail=str(e))
126
  @app.post("/api/v1/inference")
127
  async def inference(request: AudioRequest) -> AudioResponse:
 
 
 
 
 
 
 
128
  try:
129
- # Decode audio from base64
130
- audio_bytes = base64.b64decode(request.audio_data)
 
 
 
131
  audio_array = np.load(io.BytesIO(audio_bytes))
 
132
 
 
 
 
 
133
  # Run inference
134
  result = model.inference(audio_array, request.sample_rate)
 
135
 
136
- # Encode output audio
137
  buffer = io.BytesIO()
138
  np.save(buffer, result['audio'])
139
  audio_b64 = base64.b64encode(buffer.getvalue()).decode()
@@ -142,6 +158,14 @@ async def inference(request: AudioRequest) -> AudioResponse:
142
  audio_data=audio_b64,
143
  text=result.get("text", "")
144
  )
 
 
 
 
 
 
 
 
145
  if __name__ == "__main__":
146
  import uvicorn
147
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
125
  # raise HTTPException(status_code=500, detail=str(e))
126
  @app.post("/api/v1/inference")
127
  async def inference(request: AudioRequest) -> AudioResponse:
128
+ """Run inference with enhanced error handling and logging"""
129
+ if not INITIALIZATION_STATUS["model_loaded"]:
130
+ raise HTTPException(
131
+ status_code=503,
132
+ detail=f"Model not ready. Status: {INITIALIZATION_STATUS}"
133
+ )
134
+
135
  try:
136
+ # Log input validation
137
+ logger.info(f"Received inference request with sample rate: {request.sample_rate}")
138
+
139
+ # Decode audio
140
+ audio_bytes = base64.b64decode(request.audio_data)
141
  audio_array = np.load(io.BytesIO(audio_bytes))
142
+ logger.info(f"Decoded audio array shape: {audio_array.shape}, dtype: {audio_array.dtype}")
143
 
144
+ # Validate input format
145
+ if len(audio_array.shape) != 2:
146
+ raise ValueError(f"Expected 2D audio array [C,T], got shape {audio_array.shape}")
147
+
148
  # Run inference
149
  result = model.inference(audio_array, request.sample_rate)
150
+ logger.info(f"Inference complete. Output shape: {result['audio'].shape}")
151
 
152
+ # Encode output
153
  buffer = io.BytesIO()
154
  np.save(buffer, result['audio'])
155
  audio_b64 = base64.b64encode(buffer.getvalue()).decode()
 
158
  audio_data=audio_b64,
159
  text=result.get("text", "")
160
  )
161
+
162
+ except Exception as e:
163
+ logger.error(f"Inference failed: {str(e)}", exc_info=True)
164
+ raise HTTPException(
165
+ status_code=500,
166
+ detail=str(e)
167
+ )
168
+ )
169
  if __name__ == "__main__":
170
  import uvicorn
171
  uvicorn.run(app, host="0.0.0.0", port=8000)