Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from gtts import gTTS
|
4 |
-
import
|
5 |
|
6 |
# Establecer el cliente de inferencia
|
7 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
@@ -40,8 +40,7 @@ def text_to_speech(text):
|
|
40 |
def generate(user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
|
41 |
global system_prompt_sent
|
42 |
temperature = float(temperature) if temperature is not None else 0.9
|
43 |
-
|
44 |
-
temperature = 1e-2
|
45 |
top_p = float(top_p)
|
46 |
|
47 |
generate_kwargs = dict(
|
@@ -55,14 +54,16 @@ def generate(user_input, history, temperature=None, max_new_tokens=2048, top_p=0
|
|
55 |
|
56 |
formatted_prompt = format_prompt(user_input, history)
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
for response in stream:
|
62 |
-
output = response.token.text
|
63 |
-
st.markdown(f"<div style='{response_style} text-align: justify;'>{output}</div>", unsafe_allow_html=True)
|
64 |
-
ipd.Audio(text_to_speech(output), autoplay=True)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
return output
|
67 |
|
68 |
# Obtener la historia del chat desde la sesi贸n de Streamlit
|
@@ -70,6 +71,7 @@ if "history" not in st.session_state:
|
|
70 |
st.session_state.history = []
|
71 |
|
72 |
# Interfaz de usuario
|
|
|
73 |
user_input = st.text_input(label="Usuario", value="Escribe aqu铆 tu mensaje")
|
74 |
if st.button("Enviar"):
|
75 |
output = generate(user_input, history=st.session_state.history)
|
|
|
1 |
import streamlit as st
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from gtts import gTTS
|
4 |
+
import streamlit.components.v1 as stc
|
5 |
|
6 |
# Establecer el cliente de inferencia
|
7 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
40 |
def generate(user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
|
41 |
global system_prompt_sent
|
42 |
temperature = float(temperature) if temperature is not None else 0.9
|
43 |
+
temperature = max(temperature, 0.01) # Asegurar que la temperatura sea al menos 0.01
|
|
|
44 |
top_p = float(top_p)
|
45 |
|
46 |
generate_kwargs = dict(
|
|
|
54 |
|
55 |
formatted_prompt = format_prompt(user_input, history)
|
56 |
|
57 |
+
with st.spinner('Generando respuesta...'):
|
58 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
59 |
+
output = ""
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
for response in stream:
|
62 |
+
output = response.token.text
|
63 |
+
st.markdown(f"<div style='{response_style} text-align: justify;'>{output}</div>", unsafe_allow_html=True)
|
64 |
+
audio_file = text_to_speech(output)
|
65 |
+
st.audio(audio_file, format="audio/mp3", start_time=0)
|
66 |
+
|
67 |
return output
|
68 |
|
69 |
# Obtener la historia del chat desde la sesi贸n de Streamlit
|
|
|
71 |
st.session_state.history = []
|
72 |
|
73 |
# Interfaz de usuario
|
74 |
+
st.title("Chatbot Mejorado")
|
75 |
user_input = st.text_input(label="Usuario", value="Escribe aqu铆 tu mensaje")
|
76 |
if st.button("Enviar"):
|
77 |
output = generate(user_input, history=st.session_state.history)
|