artificialguybr commited on
Commit
a71a29c
·
verified ·
1 Parent(s): 0355e55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -59,17 +59,22 @@ def call_nvidia_api(history, max_tokens, temperature, top_p):
59
  return history
60
 
61
 
62
- def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
63
  """Updates the chatbot with the user message and generates a response."""
64
  print("Updating chatbot...")
 
 
 
 
65
  if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
66
  chat_history = user(message, chat_history, system_message)
67
  else:
68
  chat_history = user(message, chat_history)
69
- chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
70
  return chat_history
71
 
72
 
 
73
  # Gradio interface components
74
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
75
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
@@ -79,14 +84,7 @@ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
79
  with gr.Blocks() as demo:
80
  chat_history_state = gr.State([])
81
  chatbot = gr.ChatInterface(
82
- fn=lambda message, history: update_chatbot(
83
- message,
84
- history,
85
- system_msg.value,
86
- max_tokens.value,
87
- temperature.value,
88
- top_p.value
89
- ),
90
  additional_inputs=[system_msg, max_tokens, temperature, top_p],
91
  title="LLAMA 70B Free Demo",
92
  description="""
 
59
  return history
60
 
61
 
62
+ def update_chatbot(message, chat_history):
63
  """Updates the chatbot with the user message and generates a response."""
64
  print("Updating chatbot...")
65
+ system_message = system_msg.value
66
+ max_tokens_val = max_tokens.value
67
+ temperature_val = temperature.value
68
+ top_p_val = top_p.value
69
  if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
70
  chat_history = user(message, chat_history, system_message)
71
  else:
72
  chat_history = user(message, chat_history)
73
+ chat_history = call_nvidia_api(chat_history, max_tokens_val, temperature_val, top_p_val)
74
  return chat_history
75
 
76
 
77
+
78
  # Gradio interface components
79
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
80
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
 
84
  with gr.Blocks() as demo:
85
  chat_history_state = gr.State([])
86
  chatbot = gr.ChatInterface(
87
+ fn=update_chatbot,
 
 
 
 
 
 
 
88
  additional_inputs=[system_msg, max_tokens, temperature, top_p],
89
  title="LLAMA 70B Free Demo",
90
  description="""