salomonsky commited on
Commit
a0a031a
1 Parent(s): 27adc53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -53
app.py CHANGED
@@ -1,12 +1,30 @@
1
- import streamlit as st
2
  from huggingface_hub import InferenceClient
3
-
4
- st.set_page_config(page_title="MAYA 3.0 CHATBOT IA", page_icon=":robot_face:")
5
 
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
- conversation_history = []
8
 
9
- def generate_response(user_input, history, system_prompt, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  temperature = float(temperature)
11
  if temperature < 1e-2:
12
  temperature = 1e-2
@@ -21,56 +39,33 @@ def generate_response(user_input, history, system_prompt, temperature=0.9, max_n
21
  seed=42,
22
  )
23
 
24
- formatted_prompt = format_prompt(f"{system_prompt}, {user_input}", history)
25
- response = client.text_generation(formatted_prompt, **generate_kwargs)
26
- return response["generated_text"]
27
-
28
- def format_prompt(message, history):
29
- prompt = "<s>"
30
- for user_prompt, bot_response in history:
31
- prompt += f"[INST] {user_prompt} [/INST]"
32
- prompt += f" {bot_response}</s> "
33
- prompt += f"[INST] {message} [/INST]"
34
- return prompt
35
-
36
- def validate_input(user_input, system_prompt):
37
- if not user_input:
38
- st.warning("Por favor ingrese un mensaje.")
39
- return False
40
- if not system_prompt:
41
- st.warning("Por favor ingrese un prompt del sistema.")
42
- return False
43
- return True
44
-
45
- def display_help():
46
- st.write("Tambi茅n puede ingresar un prompt del sistema para guiar la generaci贸n de texto.")
47
-
48
- def display_conversation_history(history):
49
- for role, message in history:
50
- if role == "user":
51
- st.text(f"Usuario: {message}")
52
- elif role == "bot":
53
- st.text(f"Chatbot: {message}")
54
 
55
- def main():
56
- st.title("MAYA 3.0 CHATBOT IA")
57
- user_input = st.text_input("Escribe tu mensaje")
58
- system_prompt = st.text_input("Ingrese el prompt del sistema")
59
- temperature = st.hidden_selectbox("Temperatura", options=list(range(1, 11)))
60
- max_new_tokens = st.hidden_selectbox("N煤mero m谩ximo de nuevos tokens", options=list(range(1, 2049)))
61
- top_p = st.hidden_selectbox("Top-p (nucleus sampling)", options=list(range(1, 11)))
62
- repetition_penalty = st.hidden_selectbox("Penalizaci贸n por repetici贸n", options=list(range(1, 2)))
63
 
64
- if st.button("Enviar"):
65
- if validate_input(user_input, system_prompt):
66
- conversation_history.append(("user", user_input))
67
- bot_response = generate_response(user_input, conversation_history, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
68
- conversation_history.append(("bot", bot_response))
69
 
70
- if st.button("Ayuda"):
71
- display_help()
 
72
 
73
- display_conversation_history(conversation_history)
 
 
 
 
 
 
 
 
 
74
 
75
- if __name__ == "__main__":
76
- main()
 
 
1
  from huggingface_hub import InferenceClient
2
+ import gradio as gr
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
5
 
6
+ # Funci贸n para generar el system_prompt basado en la opci贸n seleccionada en el radiobutton
7
+ def get_system_prompt(selected_option):
8
+ prompts = {
9
+ "MAESTRO": "Soy un maestro que te guiar谩 sabiamente.",
10
+ "MEDICO": "Como m茅dico, te dar茅 informaci贸n de salud.",
11
+ "TERAPEUTA": "Ofrezco apoyo terap茅utico para tu bienestar emocional.",
12
+ "NUTRIOLOGO": "Como nutri贸logo, te proporcionar茅 consejos nutricionales.",
13
+ "FILOSOFO": "Reflexionemos sobre la filosof铆a de la vida.",
14
+ }
15
+ return prompts.get(selected_option, "")
16
+
17
+ def format_prompt(message, history):
18
+ prompt = "<s>"
19
+ for user_prompt, bot_response in history:
20
+ prompt += f"[INST] {user_prompt} [/INST]"
21
+ prompt += f" {bot_response}</s> "
22
+ prompt += f"[INST] {message} [/INST]"
23
+ return prompt
24
+
25
+ def generate(
26
+ prompt, history, system_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0,
27
+ ):
28
  temperature = float(temperature)
29
  if temperature < 1e-2:
30
  temperature = 1e-2
 
39
  seed=42,
40
  )
41
 
42
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
43
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
44
+ output = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ for response in stream:
47
+ output += response.token.text
48
+ yield output
49
+ return output
 
 
 
 
50
 
51
+ # Configuraci贸n de la interfaz con radiobuttons en lugar de barra lateral
52
+ roles_options = ["MAESTRO", "MEDICO", "TERAPEUTA", "NUTRIOLOGO", "FILOSOFO"]
53
+ roles_radio = gr.Radio(label="Selecciona un rol:", choices=roles_options)
 
 
54
 
55
+ # Funci贸n para actualizar el system_prompt cuando cambia la opci贸n en el radiobutton
56
+ def update_system_prompt(selected_option):
57
+ return get_system_prompt(selected_option)
58
 
59
+ # Configuraci贸n de la interfaz de chat con radiobuttons
60
+ chat_interface = gr.ChatInterface(
61
+ fn=generate,
62
+ inputs=["text", "text", "text", "number", "number", "number"],
63
+ outputs=["text"],
64
+ live=True,
65
+ theme="huggingface",
66
+ inputs=[roles_radio, "text", "text", "number", "number", "number"],
67
+ on_input_change=update_system_prompt,
68
+ )
69
 
70
+ # Lanzar la interfaz
71
+ chat_interface.launch()