Pinkstack commited on
Commit
0dc9ae7
·
verified ·
1 Parent(s): 00f746f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -22
app.py CHANGED
@@ -11,23 +11,22 @@ def respond(
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
14
- stop_event: gr.EventData,
15
  ) -> Iterator[str]:
16
  messages = [{"role": "system", "content": system_message}]
17
-
18
  # Add history to messages
19
  for user_msg, assistant_msg in history:
20
  if user_msg:
21
  messages.append({"role": "user", "content": user_msg})
22
  if assistant_msg:
23
  messages.append({"role": "assistant", "content": assistant_msg})
24
-
25
  # Add current message
26
  messages.append({"role": "user", "content": message})
27
-
28
  # Initialize response
29
  response = ""
30
-
31
  # Stream the response
32
  try:
33
  for chunk in client.chat_completion(
@@ -37,8 +36,6 @@ def respond(
37
  temperature=temperature,
38
  top_p=top_p,
39
  ):
40
- if stop_event.originator.get("clicked"):
41
- break
42
  if chunk.choices[0].delta.content is not None:
43
  token = chunk.choices[0].delta.content
44
  response += token
@@ -84,12 +81,12 @@ details[open] summary:after {
84
 
85
  # Create Gradio interface
86
  with gr.Blocks(css=css) as demo:
87
- gr.Markdown("## Chat with Superthoughts lite! (1.7B)")
88
- gr.Markdown("**Note:** First response may take a moment to initialize. Subsequent responses will be faster.")
89
-
90
  chatbot = gr.Chatbot(height=600)
91
  msg = gr.Textbox(label="Your message", placeholder="Type your message here...")
92
-
93
  with gr.Accordion("Advanced Settings", open=False):
94
  system_message = gr.Textbox(
95
  value="You must act in a conversational matter and always include <think> ... </think> <output> </output> tokens.",
@@ -121,12 +118,12 @@ with gr.Blocks(css=css) as demo:
121
  """Add user message to history"""
122
  return "", history + [[user_message, None]]
123
 
124
- def bot(history: list, system_message: str, max_tokens: int, temperature: float, top_p: float, stop_event: gr.EventData) -> Iterator[list]:
125
  """Generate and stream bot responses"""
126
  user_message, _ = history[-1]
127
  history[-1][1] = "" # Initialize bot's response
128
-
129
- for partial_response in respond(user_message, history[:-1], system_message, max_tokens, temperature, top_p, stop_event):
130
  history[-1][1] = partial_response
131
  yield history
132
 
@@ -138,23 +135,19 @@ with gr.Blocks(css=css) as demo:
138
  queue=False
139
  ).then(
140
  bot,
141
- [chatbot, system_message, max_tokens, temperature, top_p, gr.EventData()],
142
  chatbot
143
  )
144
 
145
  # Add a clear button
146
  clear = gr.Button("Clear Conversation")
147
  clear.click(lambda: None, None, chatbot, queue=False)
148
-
149
- # Add a stop button
150
- stop_button = gr.Button("Stop")
151
- stop_button.click(lambda: gr.EventData(clicked=True), outputs=None, queue=False)
152
-
153
  # Add disclaimer
154
  gr.Markdown(
155
  """
156
  ---
157
- ⚠️ **Disclaimer:** Superthoughts may make mistakes. Always verify important information.
158
  This chat interface is intended for testing and experimentation purposes only.
159
  """
160
  )
@@ -162,4 +155,4 @@ with gr.Blocks(css=css) as demo:
162
  # Launch the interface
163
  if __name__ == "__main__":
164
  demo.queue()
165
- demo.launch(share=True)
 
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
 
14
  ) -> Iterator[str]:
15
  messages = [{"role": "system", "content": system_message}]
16
+
17
  # Add history to messages
18
  for user_msg, assistant_msg in history:
19
  if user_msg:
20
  messages.append({"role": "user", "content": user_msg})
21
  if assistant_msg:
22
  messages.append({"role": "assistant", "content": assistant_msg})
23
+
24
  # Add current message
25
  messages.append({"role": "user", "content": message})
26
+
27
  # Initialize response
28
  response = ""
29
+
30
  # Stream the response
31
  try:
32
  for chunk in client.chat_completion(
 
36
  temperature=temperature,
37
  top_p=top_p,
38
  ):
 
 
39
  if chunk.choices[0].delta.content is not None:
40
  token = chunk.choices[0].delta.content
41
  response += token
 
81
 
82
  # Create Gradio interface
83
  with gr.Blocks(css=css) as demo:
84
+ gr.Markdown("# Chat with Superthoughts lite! (1.7B)")
85
+ gr.Markdown("**Warning:** The first output from the AI may take a few moments. After the first message, it should work at a decent speed, keep in mind that this chat is only meant for testing and experimenting.")
86
+
87
  chatbot = gr.Chatbot(height=600)
88
  msg = gr.Textbox(label="Your message", placeholder="Type your message here...")
89
+
90
  with gr.Accordion("Advanced Settings", open=False):
91
  system_message = gr.Textbox(
92
  value="You must act in a conversational matter and always include <think> ... </think> <output> </output> tokens.",
 
118
  """Add user message to history"""
119
  return "", history + [[user_message, None]]
120
 
121
+ def bot(history: list, system_message: str, max_tokens: int, temperature: float, top_p: float) -> Iterator[list]:
122
  """Generate and stream bot responses"""
123
  user_message, _ = history[-1]
124
  history[-1][1] = "" # Initialize bot's response
125
+
126
+ for partial_response in respond(user_message, history[:-1], system_message, max_tokens, temperature, top_p):
127
  history[-1][1] = partial_response
128
  yield history
129
 
 
135
  queue=False
136
  ).then(
137
  bot,
138
+ [chatbot, system_message, max_tokens, temperature, top_p],
139
  chatbot
140
  )
141
 
142
  # Add a clear button
143
  clear = gr.Button("Clear Conversation")
144
  clear.click(lambda: None, None, chatbot, queue=False)
145
+
 
 
 
 
146
  # Add disclaimer
147
  gr.Markdown(
148
  """
149
  ---
150
+ ⚠️ **Disclaimer:** Superthoughts may make mistakes. Always verify important information.
151
  This chat interface is intended for testing and experimentation purposes only.
152
  """
153
  )
 
155
  # Launch the interface
156
  if __name__ == "__main__":
157
  demo.queue()
158
+ demo.launch(share=True)