Juliofc commited on
Commit
4365403
verified
1 Parent(s): 9919ca1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -14
app.py CHANGED
@@ -42,30 +42,54 @@ CHAT_TEMPLATE= """{% for message in messages %}
42
  {% endfor %}""" # Aseg煤rate de usar tu CHAT_TEMPLATE aqu铆
43
  tokenizer.chat_template = CHAT_TEMPLATE
44
 
45
- chat_history = []
46
- # Funci贸n para generar respuestas del modelo
47
- def generate_response(user_input, chat_history):
48
- # Preparar el input agregando el historial de chat
 
49
  chat_history.append({"content": user_input, "role": "user"})
50
- user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
51
 
 
 
52
  input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
53
 
54
- # Generar la respuesta
55
  output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, do_sample=True, top_p=0.95, temperature=0.7)
56
  generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
57
-
 
58
  last_us = generated_text.rfind("</user>") + len("</user>")
59
  last_as = generated_text.rfind("</assistant>")
60
- generated_text = generated_text[last_us:last_as].strip()
 
 
61
  chat_history.append({"content": generated_text, "role": "assistant"})
62
- return generated_text, chat_history
63
 
64
- def response(user_input, chat_history):
65
- response, chat_history = generate_response(user_input, chat_history)
 
66
  print(chat_history)
67
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- iface = gr.ChatInterface(fn=response)
70
 
71
- iface.launch()
 
42
  {% endfor %}""" # Aseg煤rate de usar tu CHAT_TEMPLATE aqu铆
43
  tokenizer.chat_template = CHAT_TEMPLATE
44
 
45
+ chat_history = [] # Historial de chat global
46
+
47
+ def generate_response(user_input):
48
+ global chat_history
49
+ # Agregar input del usuario al historial
50
  chat_history.append({"content": user_input, "role": "user"})
 
51
 
52
+ # Preparaci贸n del input para el modelo
53
+ user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
54
  input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
55
 
56
+ # Generaci贸n de la respuesta del modelo
57
  output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, do_sample=True, top_p=0.95, temperature=0.7)
58
  generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
59
+
60
+ # Extracci贸n de la respuesta generada
61
  last_us = generated_text.rfind("</user>") + len("</user>")
62
  last_as = generated_text.rfind("</assistant>")
63
+ generated_text = generated_text[last_us:last_as].strip()
64
+
65
+ # Agregar la respuesta del bot al historial
66
  chat_history.append({"content": generated_text, "role": "assistant"})
 
67
 
68
+ return generated_text
69
+
70
+ def respond(message):
71
  print(chat_history)
72
+ if message: # Verificar si el mensaje no est谩 vac铆o
73
+ bot_response = generate_response(message)
74
+ return [("", bot_response)]
75
+ return [("", "")]
76
+
77
+ with gr.Blocks() as demo:
78
+ with gr.Row():
79
+ msg = gr.Textbox(label="Tu mensaje", placeholder="Escribe aqu铆...", lines=1)
80
+ send_btn = gr.Button("Enviar")
81
+ chatbot = gr.Chatbot()
82
+ clear_btn = gr.Button("Limpiar Chat")
83
+
84
+ # Acci贸n al presionar el bot贸n Enviar
85
+ send_btn.click(fn=respond, inputs=msg, outputs=chatbot, _js="() => [document.querySelector('gr-textbox').value='']")
86
+
87
+ # Funci贸n para limpiar el chat al presionar el bot贸n Limpiar
88
+ def clear_chat():
89
+ global chat_history
90
+ chat_history.clear()
91
+ chatbot.clear() # Limpiar el componente Chatbot
92
 
93
+ clear_btn.click(fn=clear_chat, inputs=None, outputs=chatbot)
94
 
95
+ demo.launch()