Spaces:
Paused
Paused
Upload 3 files
Browse files- README.md +8 -13
- app.py +41 -0
- requirements.txt +7 -0
README.md
CHANGED
|
@@ -1,14 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
title: FluentQ
|
| 3 |
-
emoji: 🦀
|
| 4 |
-
colorFrom: yellow
|
| 5 |
-
colorTo: red
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.24.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: apache-2.0
|
| 11 |
-
short_description: AGI over telecom demo
|
| 12 |
-
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AGI Telecom POC
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
This is a full stack voice interface system powered by LLM, STT, TTS, and WebRTC-ready frontend.
|
| 4 |
+
|
| 5 |
+
## Quick Start
|
| 6 |
+
```bash
|
| 7 |
+
pip install -r requirements.txt
|
| 8 |
+
uvicorn app.main:app --reload
|
| 9 |
+
```
|
app.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from fastapi import FastAPI, UploadFile, File, Request
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from fastapi.responses import StreamingResponse
|
| 5 |
+
from fastapi.staticfiles import StaticFiles
|
| 6 |
+
from app.agent import process_text
|
| 7 |
+
from app.speech_to_text import transcribe_audio
|
| 8 |
+
from app.text_to_speech import synthesize_speech
|
| 9 |
+
import io
|
| 10 |
+
|
| 11 |
+
app = FastAPI()
|
| 12 |
+
|
| 13 |
+
app.add_middleware(
|
| 14 |
+
CORSMiddleware,
|
| 15 |
+
allow_origins=["*"],
|
| 16 |
+
allow_methods=["*"],
|
| 17 |
+
allow_headers=["*"],
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
app.mount("/", StaticFiles(directory="frontend", html=True), name="frontend")
|
| 21 |
+
|
| 22 |
+
@app.post("/transcribe")
|
| 23 |
+
async def transcribe(file: UploadFile = File(...)):
|
| 24 |
+
audio_bytes = await file.read()
|
| 25 |
+
text = transcribe_audio(audio_bytes)
|
| 26 |
+
return {"transcription": text}
|
| 27 |
+
|
| 28 |
+
@app.post("/query")
|
| 29 |
+
async def query_agent(request: Request):
|
| 30 |
+
data = await request.json()
|
| 31 |
+
input_text = data.get("input_text", "")
|
| 32 |
+
response = process_text(input_text)
|
| 33 |
+
return {"response": response}
|
| 34 |
+
|
| 35 |
+
@app.get("/speak")
|
| 36 |
+
async def speak(text: str):
|
| 37 |
+
audio = synthesize_speech(text)
|
| 38 |
+
return StreamingResponse(io.BytesIO(audio), media_type="audio/wav")
|
| 39 |
+
|
| 40 |
+
# Required for Hugging Face Spaces
|
| 41 |
+
gradio_app = gr.mount_gradio_app(app, None)
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
fastapi
|
| 3 |
+
uvicorn
|
| 4 |
+
python-multipart
|
| 5 |
+
openai-whisper
|
| 6 |
+
llama-cpp-python
|
| 7 |
+
edge-tts
|