Update app.py
Browse files
app.py
CHANGED
|
@@ -16,28 +16,23 @@ headers = {
|
|
| 16 |
# Base system message
|
| 17 |
BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
|
| 18 |
|
| 19 |
-
|
| 20 |
def clear_chat():
|
| 21 |
"""Clears the chat history and message state."""
|
| 22 |
print("Clearing chat...")
|
| 23 |
chat_history_state.value = []
|
| 24 |
chatbot.textbox.value = ""
|
| 25 |
|
| 26 |
-
|
| 27 |
-
def user(message, history, system_message=None):
|
| 28 |
"""Updates the chat history with the user message."""
|
| 29 |
print(f"User message: {message}")
|
| 30 |
history = history or []
|
| 31 |
-
if system_message:
|
| 32 |
-
history.append({"role": "system", "content": system_message})
|
| 33 |
history.append({"role": "user", "content": message})
|
| 34 |
return history
|
| 35 |
|
| 36 |
-
|
| 37 |
def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
|
| 38 |
"""Calls the NVIDIA API to generate a response."""
|
| 39 |
messages = [{"role": "system", "content": system_message}]
|
| 40 |
-
messages.extend([{"role":
|
| 41 |
|
| 42 |
payload = {
|
| 43 |
"messages": messages,
|
|
@@ -58,7 +53,7 @@ def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
|
|
| 58 |
print(f"Payload recebido: {response_body}")
|
| 59 |
if response_body.get("choices"):
|
| 60 |
assistant_message = response_body["choices"][0]["message"]["content"]
|
| 61 |
-
history.append(
|
| 62 |
return history
|
| 63 |
|
| 64 |
def chatbot_submit(message, chat_history, system_message, max_tokens_val, temperature_val, top_p_val):
|
|
@@ -66,19 +61,19 @@ def chatbot_submit(message, chat_history, system_message, max_tokens_val, temper
|
|
| 66 |
print("Updating chatbot...")
|
| 67 |
|
| 68 |
# Adiciona a mensagem do usu谩rio ao hist贸rico
|
| 69 |
-
chat_history
|
| 70 |
|
| 71 |
# Chama a API da NVIDIA para gerar uma resposta
|
| 72 |
chat_history = call_nvidia_api(chat_history, system_message, max_tokens_val, temperature_val, top_p_val)
|
| 73 |
|
| 74 |
# Extrai apenas a mensagem do assistente da resposta
|
| 75 |
-
if chat_history and chat_history[-1][
|
| 76 |
-
assistant_message = chat_history[-1][
|
| 77 |
else:
|
| 78 |
assistant_message = "Desculpe, ocorreu um erro ao gerar a resposta."
|
| 79 |
|
| 80 |
return assistant_message, chat_history
|
| 81 |
-
|
| 82 |
chat_history_state = gr.State([])
|
| 83 |
system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
|
| 84 |
max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
|
|
|
|
| 16 |
# Base system message
|
| 17 |
BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
|
| 18 |
|
|
|
|
| 19 |
def clear_chat():
|
| 20 |
"""Clears the chat history and message state."""
|
| 21 |
print("Clearing chat...")
|
| 22 |
chat_history_state.value = []
|
| 23 |
chatbot.textbox.value = ""
|
| 24 |
|
| 25 |
+
def user(message, history):
|
|
|
|
| 26 |
"""Updates the chat history with the user message."""
|
| 27 |
print(f"User message: {message}")
|
| 28 |
history = history or []
|
|
|
|
|
|
|
| 29 |
history.append({"role": "user", "content": message})
|
| 30 |
return history
|
| 31 |
|
|
|
|
| 32 |
def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
|
| 33 |
"""Calls the NVIDIA API to generate a response."""
|
| 34 |
messages = [{"role": "system", "content": system_message}]
|
| 35 |
+
messages.extend([{"role": role, "content": content} for role, content in history])
|
| 36 |
|
| 37 |
payload = {
|
| 38 |
"messages": messages,
|
|
|
|
| 53 |
print(f"Payload recebido: {response_body}")
|
| 54 |
if response_body.get("choices"):
|
| 55 |
assistant_message = response_body["choices"][0]["message"]["content"]
|
| 56 |
+
history.append({"role": "assistant", "content": assistant_message})
|
| 57 |
return history
|
| 58 |
|
| 59 |
def chatbot_submit(message, chat_history, system_message, max_tokens_val, temperature_val, top_p_val):
|
|
|
|
| 61 |
print("Updating chatbot...")
|
| 62 |
|
| 63 |
# Adiciona a mensagem do usu谩rio ao hist贸rico
|
| 64 |
+
chat_history = user(message, chat_history)
|
| 65 |
|
| 66 |
# Chama a API da NVIDIA para gerar uma resposta
|
| 67 |
chat_history = call_nvidia_api(chat_history, system_message, max_tokens_val, temperature_val, top_p_val)
|
| 68 |
|
| 69 |
# Extrai apenas a mensagem do assistente da resposta
|
| 70 |
+
if chat_history and chat_history[-1]["role"] == "assistant":
|
| 71 |
+
assistant_message = chat_history[-1]["content"]
|
| 72 |
else:
|
| 73 |
assistant_message = "Desculpe, ocorreu um erro ao gerar a resposta."
|
| 74 |
|
| 75 |
return assistant_message, chat_history
|
| 76 |
+
|
| 77 |
chat_history_state = gr.State([])
|
| 78 |
system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
|
| 79 |
max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
|