realtime-api / etts-api-updated.py
Echo-ai
Update etts-api-updated.py
cbbf559 verified
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import asyncio
import os
import base64
from duckduckgo_search import DDGS
import edge_tts
import hashlib
from datetime import datetime
from typing import List, Optional
# Hardcoded values
EXPOSED_MODEL = "gpt-4o-realtime-preview-2024-12-17" # Model name shown to clients
INTERNAL_MODEL = "gpt-4o-mini" # Actual model used with DuckDuckGo Chat
VOICE = "en-US-AriaNeural"
AUDIO_DIR = "/tmp/audio_files" # Use /tmp for writable storage in Docker
# Ensure audio directory exists
if not os.path.exists(AUDIO_DIR):
try:
os.makedirs(AUDIO_DIR, exist_ok=True)
except PermissionError:
print(f"Warning: Could not create {AUDIO_DIR}. Using default tmp if available.")
app = FastAPI(title="Chat to Speech API", description="OpenAI-compatible API with text and audio responses")
# OpenAI-compatible request model
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str = EXPOSED_MODEL # Exposed model name for compatibility
messages: List[Message]
stream: Optional[bool] = False
# OpenAI-compatible response model with audio
class Choice(BaseModel):
index: int
message: Message
finish_reason: str = "stop"
audio: Optional[dict] = None
class ChatResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str = EXPOSED_MODEL # Return exposed model name
choices: List[Choice]
# Model listing for OpenWebUI compatibility
class Model(BaseModel):
id: str
object: str = "model"
created: int
owned_by: str = "xAI"
@app.get("/v1/models")
async def list_models():
"""Return a list of available models for OpenWebUI compatibility"""
return {
"object": "list",
"data": [
{
"id": EXPOSED_MODEL, # Show this to OpenWebUI
"object": "model",
"created": int(datetime.now().timestamp()),
"owned_by": "xAI"
}
]
}
async def text_to_speech(text: str, output_file: str) -> tuple[str, bytes]:
"""Convert text to speech using edge-tts and return file path and audio bytes"""
communicate = edge_tts.Communicate(text, VOICE)
await communicate.save(output_file)
with open(output_file, "rb") as f:
audio_bytes = f.read()
return output_file, audio_bytes
def get_chat_response(query: str) -> str:
"""Get response from DuckDuckGo Chat using gpt-4o-mini"""
try:
system_prompt = "<system_prompt>Your name is Vani. Give short, natural responses under 100 words that sound like casual human speech. Avoid lists, technical jargon, or complex sentences. Keep it simple and friendly for easy text-to-speech conversion.</system_prompt>"
enhanced_query = f"{system_prompt}\n\n{query}"
response = DDGS().chat(enhanced_query, model=INTERNAL_MODEL, timeout=30) # Use gpt-4o-mini internally
return response
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error: {str(e)}")
async def process_chat(query: str) -> tuple[str, bytes]:
"""Process chat query and generate audio"""
if not query:
raise HTTPException(status_code=400, detail="Query cannot be empty")
# Get chat response
response_text = get_chat_response(query)
# Generate unique output filename
hash_object = hashlib.md5(query.encode())
query_hash = hash_object.hexdigest()[:8]
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"response_{timestamp}_{query_hash}.mp3"
output_file = os.path.join(AUDIO_DIR, filename)
# Convert to speech and get audio bytes
_, audio_bytes = await text_to_speech(response_text, output_file)
return response_text, audio_bytes
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatRequest):
"""OpenAI-compatible chat endpoint with inline audio"""
user_messages = [msg for msg in request.messages if msg.role == "user"]
if not user_messages:
raise HTTPException(status_code=400, detail="No user message provided")
query = user_messages[-1].content
# Process the query
response_text, audio_bytes = await process_chat(query)
# Encode audio as base64
audio_base64 = base64.b64encode(audio_bytes).decode("utf-8")
# Construct OpenAI-compatible response with audio
chat_id = f"chatcmpl-{hashlib.md5(str(datetime.now()).encode()).hexdigest()[:8]}"
response = ChatResponse(
id=chat_id,
created=int(datetime.now().timestamp()),
model=EXPOSED_MODEL, # Show gpt-4o-realtime-preview-2024-12-17
choices=[
Choice(
index=0,
message=Message(
role="assistant",
content=response_text
),
audio={
"id": f"audio-{chat_id}",
"data": audio_base64,
"format": "mp3"
}
)
]
)
return response
# Clean up audio files on shutdown
def cleanup():
for file in os.listdir(AUDIO_DIR):
if file.startswith("response_") and file.endswith(".mp3"):
try:
os.remove(os.path.join(AUDIO_DIR, file))
except:
pass
@app.on_event("shutdown")
def shutdown_event():
cleanup()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)