Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,27 +6,20 @@ from gtts import gTTS
|
|
6 |
from audiorecorder import audiorecorder
|
7 |
import speech_recognition as sr
|
8 |
from pydub import AudioSegment
|
9 |
-
from speech_recognition import Microphone, Recognizer
|
10 |
-
import pyttsx3
|
11 |
|
12 |
if "history" not in st.session_state:
|
13 |
st.session_state.history = []
|
14 |
|
15 |
def recognize_speech(audio_data, show_messages=True):
|
16 |
recognizer = sr.Recognizer()
|
17 |
-
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
engine.runAndWait()
|
23 |
-
else:
|
24 |
-
print("Micr贸fonos encontrados:")
|
25 |
-
for i, mic in enumerate(mic_list):
|
26 |
-
print(f"{i+1}. {mic}")
|
27 |
|
28 |
try:
|
29 |
-
audio_text = recognizer.recognize_google(
|
30 |
if show_messages:
|
31 |
st.subheader("Texto Reconocido:")
|
32 |
st.write(audio_text)
|
@@ -41,13 +34,14 @@ def recognize_speech(audio_data, show_messages=True):
|
|
41 |
return audio_text
|
42 |
|
43 |
def format_prompt(message, history):
|
44 |
-
|
45 |
|
46 |
for user_prompt, bot_response in history:
|
47 |
-
|
|
|
48 |
|
49 |
-
|
50 |
-
return
|
51 |
|
52 |
def generate(audio_text, history, temperature=None, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
53 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
@@ -91,7 +85,7 @@ def text_to_speech(text, speed=1.3):
|
|
91 |
|
92 |
def main():
|
93 |
st.title("Chatbot de Voz a Voz")
|
94 |
-
|
95 |
|
96 |
if not audio_data.empty():
|
97 |
st.audio(audio_data.export().read(), format="audio/wav")
|
|
|
6 |
from audiorecorder import audiorecorder
|
7 |
import speech_recognition as sr
|
8 |
from pydub import AudioSegment
|
|
|
|
|
9 |
|
10 |
if "history" not in st.session_state:
|
11 |
st.session_state.history = []
|
12 |
|
13 |
def recognize_speech(audio_data, show_messages=True):
|
14 |
recognizer = sr.Recognizer()
|
15 |
+
audio_recording = sr.Microphone(device_index=None, sample_rate=16000, chunk_size=1024)
|
16 |
|
17 |
+
with audio_recording as source:
|
18 |
+
recognizer.adjust_for_ambient_noise(source)
|
19 |
+
audio = recognizer.listen(source, timeout=5)
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
try:
|
22 |
+
audio_text = recognizer.recognize_google(audio, language="es-ES")
|
23 |
if show_messages:
|
24 |
st.subheader("Texto Reconocido:")
|
25 |
st.write(audio_text)
|
|
|
34 |
return audio_text
|
35 |
|
36 |
def format_prompt(message, history):
|
37 |
+
prompt = "<s>"
|
38 |
|
39 |
for user_prompt, bot_response in history:
|
40 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
41 |
+
prompt += f" {bot_response}</s> "
|
42 |
|
43 |
+
prompt += f"[INST] {message} [/INST]"
|
44 |
+
return prompt
|
45 |
|
46 |
def generate(audio_text, history, temperature=None, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
47 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
85 |
|
86 |
def main():
|
87 |
st.title("Chatbot de Voz a Voz")
|
88 |
+
audio_data = audiorecorder("Habla para grabar", "Deteniendo la grabaci贸n...")
|
89 |
|
90 |
if not audio_data.empty():
|
91 |
st.audio(audio_data.export().read(), format="audio/wav")
|