Spaces:
Runtime error
Runtime error
add _safe_enqueue_text_to_speak
Browse files
__pycache__/clip_transform.cpython-39.pyc
CHANGED
|
Binary files a/__pycache__/clip_transform.cpython-39.pyc and b/__pycache__/clip_transform.cpython-39.pyc differ
|
|
|
streaming_chat_service.py
CHANGED
|
@@ -50,6 +50,18 @@ class StreamingChatService:
|
|
| 50 |
|
| 51 |
text_to_speak = sentence[:last_termination_index+1]
|
| 52 |
return text_to_speak
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
def respond_to(self, prompt):
|
| 55 |
self._messages.append({"role": "user", "content": prompt})
|
|
@@ -72,14 +84,12 @@ class StreamingChatService:
|
|
| 72 |
agent_response += chunk_text
|
| 73 |
text_to_speak = self._should_we_send_to_voice(current_sentence)
|
| 74 |
if text_to_speak:
|
| 75 |
-
|
| 76 |
-
self._audio_processor.add_audio_stream(stream)
|
| 77 |
print(text_to_speak)
|
| 78 |
current_sentence = current_sentence[len(text_to_speak):]
|
| 79 |
|
| 80 |
if len(current_sentence) > 0:
|
| 81 |
-
|
| 82 |
-
self._audio_processor.add_audio_stream(stream)
|
| 83 |
print(current_sentence)
|
| 84 |
self._messages.append({"role": "assistant", "content": agent_response})
|
| 85 |
return agent_response
|
|
|
|
| 50 |
|
| 51 |
text_to_speak = sentence[:last_termination_index+1]
|
| 52 |
return text_to_speak
|
| 53 |
+
|
| 54 |
+
def _safe_enqueue_text_to_speak(self, text_to_speak):
|
| 55 |
+
# exit if empty, white space or an single breaket
|
| 56 |
+
if text_to_speak.isspace():
|
| 57 |
+
return
|
| 58 |
+
# exit if not letters or numbers
|
| 59 |
+
has_letters = any(char.isalpha() for char in text_to_speak)
|
| 60 |
+
has_numbers = any(char.isdigit() for char in text_to_speak)
|
| 61 |
+
if not has_letters and not has_numbers:
|
| 62 |
+
return
|
| 63 |
+
stream = self._speech_service.stream(text_to_speak)
|
| 64 |
+
self._audio_processor.add_audio_stream(stream)
|
| 65 |
|
| 66 |
def respond_to(self, prompt):
|
| 67 |
self._messages.append({"role": "user", "content": prompt})
|
|
|
|
| 84 |
agent_response += chunk_text
|
| 85 |
text_to_speak = self._should_we_send_to_voice(current_sentence)
|
| 86 |
if text_to_speak:
|
| 87 |
+
self._safe_enqueue_text_to_speak(text_to_speak)
|
|
|
|
| 88 |
print(text_to_speak)
|
| 89 |
current_sentence = current_sentence[len(text_to_speak):]
|
| 90 |
|
| 91 |
if len(current_sentence) > 0:
|
| 92 |
+
self._safe_enqueue_text_to_speak(current_sentence)
|
|
|
|
| 93 |
print(current_sentence)
|
| 94 |
self._messages.append({"role": "assistant", "content": agent_response})
|
| 95 |
return agent_response
|