Update app.py
Browse files
app.py
CHANGED
@@ -63,10 +63,7 @@ def chatbot_submit(message, chat_history, system_message, max_tokens_val, temper
|
|
63 |
print("Updating chatbot...")
|
64 |
|
65 |
# Atualiza o hist贸rico do chat com a mensagem do usu谩rio
|
66 |
-
|
67 |
-
chat_history = user(message, chat_history, system_message)
|
68 |
-
else:
|
69 |
-
chat_history = user(message, chat_history)
|
70 |
|
71 |
# Chama a API da NVIDIA para gerar uma resposta
|
72 |
chat_history = call_nvidia_api(chat_history, max_tokens_val, temperature_val, top_p_val)
|
@@ -77,20 +74,19 @@ def chatbot_submit(message, chat_history, system_message, max_tokens_val, temper
|
|
77 |
else:
|
78 |
assistant_message = "Desculpe, ocorreu um erro ao gerar a resposta."
|
79 |
|
80 |
-
return assistant_message
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
83 |
with gr.Blocks() as demo:
|
84 |
-
chat_history_state = gr.State([])
|
85 |
-
system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
|
86 |
-
max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
|
87 |
-
temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
|
88 |
-
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
|
89 |
-
|
90 |
chatbot = gr.ChatInterface(
|
91 |
fn=chatbot_submit,
|
92 |
additional_inputs=[system_msg, max_tokens, temperature, top_p],
|
93 |
title="LLAMA 70B Free Demo",
|
|
|
94 |
description="""
|
95 |
<div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
|
96 |
<strong>Explore the Capabilities of LLAMA 2 70B</strong>
|
|
|
63 |
print("Updating chatbot...")
|
64 |
|
65 |
# Atualiza o hist贸rico do chat com a mensagem do usu谩rio
|
66 |
+
chat_history.append({"role": "user", "content": message})
|
|
|
|
|
|
|
67 |
|
68 |
# Chama a API da NVIDIA para gerar uma resposta
|
69 |
chat_history = call_nvidia_api(chat_history, max_tokens_val, temperature_val, top_p_val)
|
|
|
74 |
else:
|
75 |
assistant_message = "Desculpe, ocorreu um erro ao gerar a resposta."
|
76 |
|
77 |
+
return assistant_message, chat_history
|
78 |
+
|
79 |
+
chat_history_state = gr.State([])
|
80 |
+
system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
|
81 |
+
max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
|
82 |
+
temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
|
83 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
|
84 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
chatbot = gr.ChatInterface(
|
86 |
fn=chatbot_submit,
|
87 |
additional_inputs=[system_msg, max_tokens, temperature, top_p],
|
88 |
title="LLAMA 70B Free Demo",
|
89 |
+
state=chat_history_state,
|
90 |
description="""
|
91 |
<div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
|
92 |
<strong>Explore the Capabilities of LLAMA 2 70B</strong>
|