Twelve2five commited on
Commit
42a325e
·
verified ·
1 Parent(s): 38edbec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -45
app.py CHANGED
@@ -154,63 +154,50 @@ def use_gtts_for_text(text):
154
  # Enhanced WebRTC configuration with more STUN/TURN servers
155
  rtc_configuration = {
156
  "iceServers": [
157
- # Google's public STUN servers
158
- {"urls": ["stun:stun.l.google.com:19302", "stun:stun1.l.google.com:19302", "stun:stun2.l.google.com:19302", "stun:stun3.l.google.com:19302", "stun:stun4.l.google.com:19302"]},
159
-
160
- # OpenRelay TURN servers (HTTP)
161
  {
162
  "urls": ["turn:openrelay.metered.ca:80"],
163
  "username": "openrelayproject",
164
  "credential": "openrelayproject"
165
  },
166
-
167
- # OpenRelay TURN servers (HTTPS)
168
- {
169
- "urls": ["turn:openrelay.metered.ca:443"],
170
- "username": "openrelayproject",
171
- "credential": "openrelayproject"
172
- },
173
-
174
- # OpenRelay TURN servers (TCP)
175
  {
176
  "urls": ["turn:openrelay.metered.ca:443?transport=tcp"],
177
  "username": "openrelayproject",
178
  "credential": "openrelayproject"
179
- },
180
-
181
- # Additional public STUN servers
182
- {"urls": ["stun:stun.stunprotocol.org:3478"]}
183
  ],
184
- "iceCandidatePoolSize": 10 # Increase the pool size
185
  }
186
 
187
- # Set WebRTC logging level to "debug" for more information
188
- os.environ["WEBRTC_TRACE"] = "WEBRTC_TRACE_ALL"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
- # Create Gradio interface with simple structure
191
- with gr.Blocks(title="LLM Voice Chat") as demo:
192
- gr.Markdown("# LLM Voice Chat (Powered by DeepSeek & ElevenLabs)")
193
 
194
- with gr.Row():
195
- chatbot = gr.Chatbot(type="messages", label="Chat History")
196
 
197
- with gr.Row():
198
- # Create the Stream component for handling audio
199
- stream_comp = Stream(
200
- modality="audio",
201
- mode="send-receive",
202
- handler=ReplyOnPause(response, input_sample_rate=16000),
203
- additional_outputs_handler=lambda a, b: b,
204
- additional_inputs=[chatbot],
205
- additional_outputs=[chatbot],
206
- rtc_configuration=rtc_configuration,
207
- concurrency_limit=5 if get_space() else None,
208
- time_limit=90 if get_space() else None,
209
- )
210
-
211
- # Make sure to render the Stream component
212
- stream_comp.render()
213
-
214
- # For local development only
215
- if __name__ == "__main__" and not get_space():
216
- demo.launch(share=True)
 
154
  # Enhanced WebRTC configuration with more STUN/TURN servers
155
  rtc_configuration = {
156
  "iceServers": [
157
+ {"urls": ["stun:stun.l.google.com:19302", "stun:stun1.l.google.com:19302"]},
 
 
 
158
  {
159
  "urls": ["turn:openrelay.metered.ca:80"],
160
  "username": "openrelayproject",
161
  "credential": "openrelayproject"
162
  },
 
 
 
 
 
 
 
 
 
163
  {
164
  "urls": ["turn:openrelay.metered.ca:443?transport=tcp"],
165
  "username": "openrelayproject",
166
  "credential": "openrelayproject"
167
+ }
 
 
 
168
  ],
169
+ "iceCandidatePoolSize": 10
170
  }
171
 
172
+ # Create Gradio chatbot and stream - following the exact cookbook pattern
173
+ chatbot = gr.Chatbot(type="messages")
174
+ stream = Stream(
175
+ modality="audio",
176
+ mode="send-receive",
177
+ handler=ReplyOnPause(response, input_sample_rate=16000),
178
+ additional_outputs_handler=lambda a, b: b,
179
+ additional_inputs=[chatbot],
180
+ additional_outputs=[chatbot],
181
+ rtc_configuration=rtc_configuration,
182
+ concurrency_limit=5 if get_space() else None,
183
+ time_limit=90 if get_space() else None,
184
+ ui_args={"title": "LLM Voice Chat (Powered by DeepSeek & ElevenLabs)"}
185
+ )
186
+
187
+ # Mount the Stream UI to FastAPI
188
+ app = FastAPI()
189
+ app = gr.mount_gradio_app(app, stream.ui, path="/")
190
 
191
+ # Only for local development
192
+ if __name__ == "__main__":
193
+ import uvicorn
194
 
195
+ os.environ["GRADIO_SSR_MODE"] = "false"
 
196
 
197
+ if get_space():
198
+ # When running in HF Spaces, use their port and host
199
+ port = int(os.environ.get("PORT", 7860))
200
+ uvicorn.run(app, host="0.0.0.0", port=port)
201
+ else:
202
+ # For local development
203
+ stream.ui.launch(server_port=7860)