Twelve2five commited on
Commit
1cce1a4
·
verified ·
1 Parent(s): 42a325e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -16
app.py CHANGED
@@ -4,7 +4,6 @@ import gradio as gr
4
  import numpy as np
5
  from dotenv import load_dotenv
6
  from elevenlabs import ElevenLabs
7
- from fastapi import FastAPI
8
  from fastrtc import (
9
  Stream,
10
  get_stt_model,
@@ -169,7 +168,7 @@ rtc_configuration = {
169
  "iceCandidatePoolSize": 10
170
  }
171
 
172
- # Create Gradio chatbot and stream - following the exact cookbook pattern
173
  chatbot = gr.Chatbot(type="messages")
174
  stream = Stream(
175
  modality="audio",
@@ -184,20 +183,11 @@ stream = Stream(
184
  ui_args={"title": "LLM Voice Chat (Powered by DeepSeek & ElevenLabs)"}
185
  )
186
 
187
- # Mount the Stream UI to FastAPI
188
- app = FastAPI()
189
- app = gr.mount_gradio_app(app, stream.ui, path="/")
190
 
191
- # Only for local development
192
- if __name__ == "__main__":
193
  import uvicorn
194
-
195
  os.environ["GRADIO_SSR_MODE"] = "false"
196
-
197
- if get_space():
198
- # When running in HF Spaces, use their port and host
199
- port = int(os.environ.get("PORT", 7860))
200
- uvicorn.run(app, host="0.0.0.0", port=port)
201
- else:
202
- # For local development
203
- stream.ui.launch(server_port=7860)
 
4
  import numpy as np
5
  from dotenv import load_dotenv
6
  from elevenlabs import ElevenLabs
 
7
  from fastrtc import (
8
  Stream,
9
  get_stt_model,
 
168
  "iceCandidatePoolSize": 10
169
  }
170
 
171
+ # Create Gradio chatbot and stream
172
  chatbot = gr.Chatbot(type="messages")
173
  stream = Stream(
174
  modality="audio",
 
183
  ui_args={"title": "LLM Voice Chat (Powered by DeepSeek & ElevenLabs)"}
184
  )
185
 
186
+ # Export the UI for Hugging Face Spaces
187
+ demo = stream.ui
 
188
 
189
+ # For local development only
190
+ if __name__ == "__main__" and not get_space():
191
  import uvicorn
 
192
  os.environ["GRADIO_SSR_MODE"] = "false"
193
+ stream.ui.launch(server_port=7860)