salomonsky commited on
Commit
595f1c1
·
verified ·
1 Parent(s): 72b7296

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -19
app.py CHANGED
@@ -4,7 +4,7 @@ from gtts import gTTS
4
  import base64
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
- system_prompt = "Tu nombre será Chaman 3.0 una IA conductual."
8
  system_prompt_sent = False
9
 
10
  def format_prompt(message, history):
@@ -46,10 +46,10 @@ def generate(
46
  do_sample=True,
47
  seed=42,
48
  )
49
-
50
  formatted_prompt = format_prompt(user_input, history)
51
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
52
-
53
  response = ""
54
  for response_token in stream:
55
  response += response_token.token.text
@@ -59,25 +59,26 @@ def generate(
59
  audio_file_path = text_to_speech(response)
60
  audio_file = open(audio_file_path, 'rb')
61
  audio_bytes = audio_file.read()
62
-
63
  with st.container():
64
  st.text_area("Salida del Chatbot", value=response, height=200, max_chars=500, key="output_text", disabled=True)
65
-
66
- st.markdown(
67
- f"""
68
- <audio autoplay="autoplay" controls="controls" src="data:audio/mp3;base64,{base64.b64encode(audio_bytes).decode()}" type="audio/mp3" speed="1.5" id="audio_player"></audio>
69
- """,
70
- unsafe_allow_html=True
71
- )
72
 
73
- for user_prompt, bot_response in history:
 
 
 
 
 
 
 
74
  st.write(f"Usuario: {user_prompt}")
75
  st.write(f"Respuesta: {bot_response}")
76
- st.markdown("---")
77
-
78
- if "history" not in st.session_state:
79
- st.session_state.history = []
 
 
80
 
81
- user_input = st.text_input(label="Usuario", value="")
82
- output = generate(user_input, history=st.session_state.history)
83
- st.session_state.history.append((user_input, output))
 
4
  import base64
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
+ system_prompt = "Tu nombre es Chaman 3.0 una IA conductual"
8
  system_prompt_sent = False
9
 
10
  def format_prompt(message, history):
 
46
  do_sample=True,
47
  seed=42,
48
  )
49
+
50
  formatted_prompt = format_prompt(user_input, history)
51
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
52
+
53
  response = ""
54
  for response_token in stream:
55
  response += response_token.token.text
 
59
  audio_file_path = text_to_speech(response)
60
  audio_file = open(audio_file_path, 'rb')
61
  audio_bytes = audio_file.read()
62
+
63
  with st.container():
64
  st.text_area("Salida del Chatbot", value=response, height=200, max_chars=500, key="output_text", disabled=True)
 
 
 
 
 
 
 
65
 
66
+ if "history" not in st.session_state:
67
+ st.session_state.history = []
68
+
69
+ user_input = st.text_input(label="Usuario", value="Escribe aquí tu mensaje")
70
+ output = generate(user_input, history=st.session_state.history)
71
+ st.session_state.history.append((user_input, output))
72
+
73
+ for user_prompt, bot_response in st.session_state.history:
74
  st.write(f"Usuario: {user_prompt}")
75
  st.write(f"Respuesta: {bot_response}")
76
+ st.markdown(
77
+ f"""
78
+ <audio autoplay="autoplay" controls="controls" src="data:audio/mp3;base64,{base64.b64encode(audio_bytes).decode()}" type="audio/mp3" speed="1.5" id="audio_player"></audio>
79
+ """,
80
+ unsafe_allow_html=True
81
+ )
82
 
83
+ if __name__ == "__main__":
84
+ generate(user_input="", history=[])