Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,10 @@ from huggingface_hub import InferenceClient
|
|
3 |
from gtts import gTTS
|
4 |
import base64
|
5 |
from pydub import AudioSegment
|
6 |
-
import io
|
7 |
from pydub.playback import play
|
8 |
|
9 |
-
st.sidebar.width = 100
|
10 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
11 |
-
pre_prompt = "
|
12 |
pre_prompt_sent = False
|
13 |
|
14 |
def format_prompt(message, history):
|
@@ -36,7 +34,7 @@ def text_to_speech(text, speed=1.3):
|
|
36 |
|
37 |
return audio_file_path
|
38 |
|
39 |
-
def generate(user_input, history, temperature=None, max_new_tokens=
|
40 |
global pre_prompt_sent
|
41 |
temperature = float(temperature) if temperature is not None else 0.9
|
42 |
if temperature < 1e-2:
|
@@ -63,26 +61,21 @@ def generate(user_input, history, temperature=None, max_new_tokens=512, top_p=0.
|
|
63 |
|
64 |
audio_file_path = text_to_speech(response, speed=1.3)
|
65 |
audio_file = open(audio_file_path, 'rb')
|
66 |
-
audio_bytes =
|
67 |
|
68 |
return response, audio_bytes
|
69 |
|
70 |
if "history" not in st.session_state:
|
71 |
st.session_state.history = []
|
72 |
|
73 |
-
|
74 |
-
user_input = st.
|
75 |
output, audio_bytes = generate(user_input, history=st.session_state.history)
|
76 |
-
st.session_state.history[-1] = (user_input, output)
|
77 |
-
else:
|
78 |
-
output, audio_bytes = "", ""
|
79 |
|
80 |
-
|
81 |
-
|
82 |
|
83 |
-
|
84 |
-
output, audio_bytes = generate(user_input, history=st.session_state.history)
|
85 |
-
st.session_state.history.append((user_input, output))
|
86 |
|
87 |
st.markdown(
|
88 |
f"""
|
|
|
3 |
from gtts import gTTS
|
4 |
import base64
|
5 |
from pydub import AudioSegment
|
|
|
6 |
from pydub.playback import play
|
7 |
|
|
|
8 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
9 |
+
pre_prompt = "Tu nombre será Chaman 3.0 una IA conductual, hablarás español tu rol es la bioética y el estoicismo holístico."
|
10 |
pre_prompt_sent = False
|
11 |
|
12 |
def format_prompt(message, history):
|
|
|
34 |
|
35 |
return audio_file_path
|
36 |
|
37 |
+
def generate(user_input, history, temperature=None, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0):
|
38 |
global pre_prompt_sent
|
39 |
temperature = float(temperature) if temperature is not None else 0.9
|
40 |
if temperature < 1e-2:
|
|
|
61 |
|
62 |
audio_file_path = text_to_speech(response, speed=1.3)
|
63 |
audio_file = open(audio_file_path, 'rb')
|
64 |
+
audio_bytes = audio_file.read()
|
65 |
|
66 |
return response, audio_bytes
|
67 |
|
68 |
if "history" not in st.session_state:
|
69 |
st.session_state.history = []
|
70 |
|
71 |
+
with st.beta_container():
|
72 |
+
user_input = st.text_input(label="", value="")
|
73 |
output, audio_bytes = generate(user_input, history=st.session_state.history)
|
|
|
|
|
|
|
74 |
|
75 |
+
if user_input:
|
76 |
+
st.session_state.history.append((user_input, output))
|
77 |
|
78 |
+
st.text_area("Salida del Chatbot", value=output, height=500, max_chars=600, key="output_text", disabled=True)
|
|
|
|
|
79 |
|
80 |
st.markdown(
|
81 |
f"""
|