Spaces:
Sleeping
Sleeping
Commit
路
d9f7657
1
Parent(s):
8160127
Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,58 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
import os
|
4 |
|
5 |
-
# Crear el cliente de inferencia
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
# Interfaz de Usuario con Streamlit
|
9 |
st.title("MAYA 3.0 CHATBOT IA")
|
10 |
|
11 |
# Par谩metros de usuario
|
12 |
user_input = st.text_input("Ingrese su instrucci贸n:")
|
13 |
-
prompt, history, system_prompt = st.text_input("Prompt:"), st.text_area("Historial:"), st.text_input("System Prompt:")
|
14 |
-
temperature, max_new_tokens, top_p, repetition_penalty = st.slider("Temperatura", 0.1, 2.0), st.slider("N煤mero m谩ximo de nuevos tokens", 10, 5000), st.slider("Top-p", 0.1, 1.0), st.slider("Penalizaci贸n por repetici贸n", 0.1, 2.0)
|
15 |
|
16 |
# Bot贸n para generar texto
|
17 |
-
if st.button("Generar") and user_input:
|
18 |
try:
|
19 |
# Generar texto basado en la instrucci贸n del usuario y los par谩metros seleccionados
|
20 |
-
generated_text =
|
21 |
user_input,
|
22 |
-
|
23 |
-
|
24 |
-
system_prompt=system_prompt,
|
25 |
-
temperature=temperature,
|
26 |
-
max_new_tokens=max_new_tokens,
|
27 |
-
top_p=top_p,
|
28 |
-
repetition_penalty=repetition_penalty
|
29 |
)
|
30 |
|
31 |
-
# Mostrar el texto generado
|
32 |
-
st.text_area("", generated_text, height=
|
33 |
|
34 |
except Exception as e:
|
35 |
# Manejar errores y mostrar mensaje de error
|
36 |
st.error(f"Error al generar texto: {str(e)}")
|
37 |
elif not user_input:
|
38 |
-
st.warning("Por favor, ingrese una instrucci贸n antes de generar texto.")
|
|
|
1 |
+
import os
|
2 |
import streamlit as st
|
3 |
from huggingface_hub import InferenceClient
|
|
|
4 |
|
|
|
5 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
6 |
|
7 |
+
def format_prompt(message, history):
|
8 |
+
prompt = "<s>"
|
9 |
+
for user_prompt, bot_response in history:
|
10 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
11 |
+
prompt += f" {bot_response}</s> "
|
12 |
+
prompt += f"[INST] {message} [/INST]"
|
13 |
+
return prompt
|
14 |
+
|
15 |
+
def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
|
16 |
+
temperature = float(temperature)
|
17 |
+
if temperature < 1e-2:
|
18 |
+
temperature = 1e-2
|
19 |
+
top_p = float(top_p)
|
20 |
+
|
21 |
+
generate_kwargs = dict(
|
22 |
+
temperature=temperature,
|
23 |
+
max_new_tokens=max_new_tokens,
|
24 |
+
top_p=top_p,
|
25 |
+
repetition_penalty=repetition_penalty,
|
26 |
+
do_sample=True,
|
27 |
+
seed=42,
|
28 |
+
)
|
29 |
+
|
30 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
31 |
+
response = client.text_generation(formatted_prompt, **generate_kwargs)
|
32 |
+
|
33 |
+
return response["generated_text"]
|
34 |
+
|
35 |
# Interfaz de Usuario con Streamlit
|
36 |
st.title("MAYA 3.0 CHATBOT IA")
|
37 |
|
38 |
# Par谩metros de usuario
|
39 |
user_input = st.text_input("Ingrese su instrucci贸n:")
|
|
|
|
|
40 |
|
41 |
# Bot贸n para generar texto
|
42 |
+
if st.button("Generar Texto") and user_input:
|
43 |
try:
|
44 |
# Generar texto basado en la instrucci贸n del usuario y los par谩metros seleccionados
|
45 |
+
generated_text = generate(
|
46 |
user_input,
|
47 |
+
[],
|
48 |
+
"", # Puedes dejar el system_prompt vac铆o si no es necesario
|
|
|
|
|
|
|
|
|
|
|
49 |
)
|
50 |
|
51 |
+
# Mostrar el texto generado de manera readonly
|
52 |
+
st.text_area("Texto Generado:", generated_text, height=200, key="output_text", readonly=True)
|
53 |
|
54 |
except Exception as e:
|
55 |
# Manejar errores y mostrar mensaje de error
|
56 |
st.error(f"Error al generar texto: {str(e)}")
|
57 |
elif not user_input:
|
58 |
+
st.warning("Por favor, ingrese una instrucci贸n antes de generar texto.")
|