Twelve2five commited on
Commit
d518218
·
verified ·
1 Parent(s): c4620f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -2
app.py CHANGED
@@ -8,7 +8,6 @@ from fastapi import FastAPI
8
  from fastrtc import (
9
  Stream,
10
  get_stt_model,
11
- get_twilio_turn_credentials,
12
  ReplyOnPause,
13
  AdditionalOutputs
14
  )
@@ -152,6 +151,28 @@ def use_gtts_for_text(text):
152
  print(f"gTTS error: {e}")
153
  yield None
154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  # Create Gradio chatbot and stream
156
  chatbot = gr.Chatbot(type="messages")
157
  stream = Stream(
@@ -161,7 +182,7 @@ stream = Stream(
161
  additional_outputs_handler=lambda a, b: b,
162
  additional_inputs=[chatbot],
163
  additional_outputs=[chatbot],
164
- rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
165
  concurrency_limit=5 if get_space() else None,
166
  time_limit=90 if get_space() else None,
167
  ui_args={"title": "LLM Voice Chat (Powered by DeepSeek & ElevenLabs)"}
@@ -182,3 +203,4 @@ if __name__ == "__main__":
182
  stream.fastphone(host="0.0.0.0", port=7860)
183
  else:
184
  stream.ui.launch(server_port=7860)
 
 
8
  from fastrtc import (
9
  Stream,
10
  get_stt_model,
 
11
  ReplyOnPause,
12
  AdditionalOutputs
13
  )
 
151
  print(f"gTTS error: {e}")
152
  yield None
153
 
154
+ # Custom WebRTC configuration that doesn't require Twilio
155
+ rtc_configuration = {
156
+ "iceServers": [
157
+ {"urls": ["stun:stun.l.google.com:19302"]},
158
+ {
159
+ "urls": ["turn:openrelay.metered.ca:80"],
160
+ "username": "openrelayproject",
161
+ "credential": "openrelayproject"
162
+ },
163
+ {
164
+ "urls": ["turn:openrelay.metered.ca:443"],
165
+ "username": "openrelayproject",
166
+ "credential": "openrelayproject"
167
+ },
168
+ {
169
+ "urls": ["turn:openrelay.metered.ca:443?transport=tcp"],
170
+ "username": "openrelayproject",
171
+ "credential": "openrelayproject"
172
+ }
173
+ ]
174
+ }
175
+
176
  # Create Gradio chatbot and stream
177
  chatbot = gr.Chatbot(type="messages")
178
  stream = Stream(
 
182
  additional_outputs_handler=lambda a, b: b,
183
  additional_inputs=[chatbot],
184
  additional_outputs=[chatbot],
185
+ rtc_configuration=rtc_configuration if get_space() else None,
186
  concurrency_limit=5 if get_space() else None,
187
  time_limit=90 if get_space() else None,
188
  ui_args={"title": "LLM Voice Chat (Powered by DeepSeek & ElevenLabs)"}
 
203
  stream.fastphone(host="0.0.0.0", port=7860)
204
  else:
205
  stream.ui.launch(server_port=7860)
206
+