salomonsky commited on
Commit
e000aac
verified
1 Parent(s): 140c389

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -28
app.py CHANGED
@@ -3,17 +3,10 @@ from huggingface_hub import InferenceClient
3
  from gtts import gTTS
4
  import streamlit.components.v1 as stc
5
 
6
- # Establecer el cliente de inferencia
7
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
-
9
- # Configuraci贸n del sistema
10
  system_prompt = "Tu nombre es Xaman 3.0"
11
  system_prompt_sent = False
12
 
13
- # Configuraci贸n del estilo para el texto de respuesta
14
- response_style = "background-color: #f0f0f0; padding: 10px; border-radius: 10px; margin: 10px 0; max-height: 700px; overflow-y: auto;"
15
-
16
- # Funci贸n para formatear el prompt
17
  def format_prompt(message, history):
18
  global system_prompt_sent
19
  prompt = "<s>"
@@ -30,17 +23,18 @@ def format_prompt(message, history):
30
  prompt += f"[INST] {message} [/INST]"
31
  return prompt
32
 
33
- # Funci贸n para convertir texto a voz
34
  def text_to_speech(text):
35
  tts = gTTS(text=text, lang='es')
36
  tts.save('output.mp3')
37
  return 'output.mp3'
38
 
39
- # Funci贸n para generar respuesta y mostrar en la interfaz
40
- def generate(user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
 
41
  global system_prompt_sent
42
  temperature = float(temperature) if temperature is not None else 0.9
43
- temperature = max(temperature, 0.01) # Asegurar que la temperatura sea al menos 0.01
 
44
  top_p = float(top_p)
45
 
46
  generate_kwargs = dict(
@@ -53,26 +47,18 @@ def generate(user_input, history, temperature=None, max_new_tokens=2048, top_p=0
53
  )
54
 
55
  formatted_prompt = format_prompt(user_input, history)
56
-
57
- with st.spinner('Generando respuesta...'):
58
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
59
- output = ""
 
 
60
 
61
- for response in stream:
62
- output = response.token.text
63
- st.markdown(f"<div style='{response_style} text-align: justify;'>{output}</div>", unsafe_allow_html=True)
64
- audio_file = text_to_speech(output)
65
- st.audio(audio_file, format="audio/mp3", start_time=0)
66
-
67
- return output
68
 
69
- # Obtener la historia del chat desde la sesi贸n de Streamlit
70
  if "history" not in st.session_state:
71
  st.session_state.history = []
72
 
73
- # Interfaz de usuario
74
- st.title("Chatbot Mejorado")
75
  user_input = st.text_input(label="Usuario", value="Escribe aqu铆 tu mensaje")
76
- if st.button("Enviar"):
77
- output = generate(user_input, history=st.session_state.history)
78
- st.session_state.history.append((user_input, output))
 
3
  from gtts import gTTS
4
  import streamlit.components.v1 as stc
5
 
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
7
  system_prompt = "Tu nombre es Xaman 3.0"
8
  system_prompt_sent = False
9
 
 
 
 
 
10
  def format_prompt(message, history):
11
  global system_prompt_sent
12
  prompt = "<s>"
 
23
  prompt += f"[INST] {message} [/INST]"
24
  return prompt
25
 
 
26
  def text_to_speech(text):
27
  tts = gTTS(text=text, lang='es')
28
  tts.save('output.mp3')
29
  return 'output.mp3'
30
 
31
+ def generate(
32
+ user_input, history, temperature=None, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
33
+ ):
34
  global system_prompt_sent
35
  temperature = float(temperature) if temperature is not None else 0.9
36
+ if temperature < 1e-2:
37
+ temperature = 1e-2
38
  top_p = float(top_p)
39
 
40
  generate_kwargs = dict(
 
47
  )
48
 
49
  formatted_prompt = format_prompt(user_input, history)
50
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
51
+ response = " ".join(response.token.text for response in stream)
52
+
53
+ st.markdown(f"<p style='text-align: justify;'>{response}</p>", unsafe_allow_html=True)
54
+ audio_file = text_to_speech(response)
55
+ st.audio(audio_file, format="audio/mp3", start_time=0)
56
 
57
+ return response
 
 
 
 
 
 
58
 
 
59
  if "history" not in st.session_state:
60
  st.session_state.history = []
61
 
 
 
62
  user_input = st.text_input(label="Usuario", value="Escribe aqu铆 tu mensaje")
63
+ output = generate(user_input, history=st.session_state.history)
64
+ st.session_state.history.append((user_input, output))