Spaces:
Running
Running
File size: 3,068 Bytes
7d2c473 8160127 a79a56f 5100159 b676cee f8a4c47 b676cee 946827a 1fe1359 a0a031a b676cee c501864 a0a031a c501864 af78765 c501864 af78765 c501864 a0a031a b676cee 3d98a19 a79a56f b676cee e000aac c501864 3bbf5c2 e000aac d9f7657 3a39ae5 4a766c1 e000aac f27b29a 770c41b e000aac b676cee 770c41b 32b3b4c 5100159 e000aac ce0dbfb b676cee cb046ad 5c113c1 b676cee 3a39ae5 743c6be 3a39ae5 b676cee 3a39ae5 b676cee e000aac 3a39ae5 b676cee 743c6be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import streamlit as st
from huggingface_hub import InferenceClient
from gtts import gTTS
# Inicializar el cliente de inferencia
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
# Definir el prompt del sistema
system_prompt = "Tu nombre es Xaman 3.0"
system_prompt_sent = False
# Funci贸n para formatear el prompt
def format_prompt(message, history):
global system_prompt_sent
prompt = "<s>"
if history is not None and isinstance(history, list):
if not any(f"[INST] {system_prompt} [/INST]" in user_prompt for user_prompt, _ in history):
prompt += f"[INST] {system_prompt} [/INST]"
system_prompt_sent = True
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
# Funci贸n para convertir texto a audio
def text_to_speech(text):
tts = gTTS(text=text, lang='es')
tts.save('output.mp3')
return 'output.mp3'
# Funci贸n para generar respuesta
def generate(
user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
):
global system_prompt_sent
temperature = float(temperature) if temperature is not None else 0.9
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(user_input, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
response = ""
for response_token in stream:
response += response_token.token.text
response = ' '.join(response.split()).replace('</s>', '')
# Mostrar respuesta en un 谩rea de texto
st.text_area("Bot:", value=response, height=200, key='response_area')
# Convertir respuesta a audio y reproducirlo
audio_file = text_to_speech(response)
st.audio(audio_file, format="audio/mp3", start_time=0, key='audio')
return response
# Inicializar historial si no existe
if "history" not in st.session_state:
st.session_state.history = []
# Interfaz de usuario con Streamlit
st.title("Chatbot Interactivo")
user_input = st.text_area(label="Usuario", value="Escribe aqu铆 tu mensaje", height=100)
# Mostrar historial de conversaci贸n
st.subheader("Historial de Conversaci贸n")
for user_prompt, bot_response in st.session_state.history:
st.write(f"Usuario: {user_prompt}")
st.write(f"Bot: {bot_response}")
st.markdown("---")
# Generar respuesta y actualizar historial
output = generate(user_input, history=st.session_state.history)
st.session_state.history.append((user_input, output))
# Reproducir respuesta en formato de audio
audio_file = text_to_speech(output)
st.audio(audio_file, format="audio/mp3", start_time=0, key='audio') |