Pinkstack commited on
Commit
d0018d0
·
verified ·
1 Parent(s): cdfe590

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -40,13 +40,13 @@ css = """
40
  .thoughts {
41
  border: 1px solid #ccc;
42
  padding: 10px;
43
- background-color: #f9f9f9;
44
  border-radius: 5px;
45
  }
46
  details summary {
47
  cursor: pointer;
48
  padding: 5px;
49
- background-color: #e0e0e0;
50
  border-radius: 5px;
51
  font-weight: bold;
52
  }
@@ -62,13 +62,13 @@ details[open] summary:after {
62
  """
63
 
64
  with gr.Blocks(css=css) as demo:
65
- gr.Markdown("## Chat with Superthoughts")
66
- gr.Markdown("**Warning:** The first output from the AI may take a few moments. After the first message, it should work quickly.")
67
 
68
  chatbot = gr.Chatbot()
69
  msg = gr.Textbox()
70
  system_message = gr.Textbox(value="You must always include <think> ... </think> <output> </output> tokens.", label="System message")
71
- max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
72
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
73
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
74
 
@@ -84,7 +84,7 @@ with gr.Blocks(css=css) as demo:
84
  history[-1][1] = formatted_response
85
  return history
86
 
87
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
88
  bot, [chatbot, system_message, max_tokens, temperature, top_p], chatbot
89
  )
90
 
 
40
  .thoughts {
41
  border: 1px solid #ccc;
42
  padding: 10px;
43
+ background-color: #000000;
44
  border-radius: 5px;
45
  }
46
  details summary {
47
  cursor: pointer;
48
  padding: 5px;
49
+ background-color: #000000;
50
  border-radius: 5px;
51
  font-weight: bold;
52
  }
 
62
  """
63
 
64
  with gr.Blocks(css=css) as demo:
65
+ gr.Markdown("## Chat with Superthoughts lite! (1.7B)")
66
+ gr.Markdown("**Warning:** The first output from the AI may take a few moments. After the first message, it should work at a decent speed.")
67
 
68
  chatbot = gr.Chatbot()
69
  msg = gr.Textbox()
70
  system_message = gr.Textbox(value="You must always include <think> ... </think> <output> </output> tokens.", label="System message")
71
+ max_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens")
72
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
73
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
74
 
 
84
  history[-1][1] = formatted_response
85
  return history
86
 
87
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=True).then(
88
  bot, [chatbot, system_message, max_tokens, temperature, top_p], chatbot
89
  )
90