ruslanmv commited on
Commit
507d35c
·
verified ·
1 Parent(s): 1fa0961

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -40
app.py CHANGED
@@ -81,7 +81,14 @@ def count_tokens(text: str) -> int:
81
  return len(tokenizer.encode(text))
82
 
83
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
84
- """Truncates the conversation history to fit within the maximum token limit."""
 
 
 
 
 
 
 
85
  truncated_history = []
86
  system_message_tokens = count_tokens(system_message)
87
  current_length = system_message_tokens
@@ -93,55 +100,53 @@ def truncate_history(history: list[tuple[str, str]], system_message: str, max_le
93
  turn_tokens = user_tokens + assistant_tokens
94
 
95
  if current_length + turn_tokens <= max_length:
96
- truncated_history.insert(0, (user_msg, assistant_msg))
97
  current_length += turn_tokens
98
  else:
99
  break # Stop adding turns if we exceed the limit
100
 
101
  return truncated_history
102
 
103
- def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, clear_memory):
104
- """Responds to a user message, maintaining conversation history using a streaming generator."""
105
- # Check for the clear memory command (or if the Clear Memory button was triggered)
106
- if message.lower() == "clear memory" or clear_memory:
107
- return "", [] # Reset the chat
 
 
 
 
108
 
109
- formatted_system_message = system_message # Use the provided system message
110
- truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
111
 
112
- messages = [{"role": "system", "content": formatted_system_message}]
 
 
 
113
  for user_msg, assistant_msg in truncated_history:
114
  if user_msg:
115
- messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
116
  if assistant_msg:
117
- messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
118
- messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
 
119
 
120
  response = ""
121
- yielded_any = False
122
  try:
123
- for chunk in client.chat_completion(
124
- messages,
125
- max_tokens=max_tokens,
126
- stream=True,
127
- temperature=temperature,
128
- top_p=top_p,
129
- ):
130
- token = chunk.choices[0].delta.content
131
- response += token
132
- yield response
133
- yielded_any = True
134
- except StopAsyncIteration:
135
- # If the generator stops without yielding any tokens, yield the accumulated response
136
- if not yielded_any:
137
- yield response
138
- return
139
  except Exception as e:
140
- print(f"An error occurred: {e}")
141
- yield "I'm sorry, I encountered an error. Please try again."
142
- # Ensure at least one yield if no tokens were yielded inside the loop
143
- if not yielded_any:
144
- yield response
145
 
146
  # --- Gradio Interface ---
147
  demo = gr.ChatInterface(
@@ -151,15 +156,19 @@ demo = gr.ChatInterface(
151
  value=default_nvc_prompt_template,
152
  label="System message",
153
  visible=True,
154
- lines=10 # Increased height for easier reading of the prompt
155
  ),
156
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
157
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
158
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
159
- gr.Button("Clear Memory"),
 
 
 
 
 
160
  ],
161
- chatbot_kwargs={"format": "messages"} # Use messages format to avoid deprecation warnings
162
  )
163
 
164
  if __name__ == "__main__":
165
- demo.launch(share=True) # Note: share=True is not supported on Hugging Face Spaces.
 
81
  return len(tokenizer.encode(text))
82
 
83
  def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
84
+ """Truncates the conversation history to fit within the maximum token limit.
85
+ Args:
86
+ history: The conversation history (list of user/assistant tuples).
87
+ system_message: The system message.
88
+ max_length: The maximum number of tokens allowed.
89
+ Returns:
90
+ The truncated history.
91
+ """
92
  truncated_history = []
93
  system_message_tokens = count_tokens(system_message)
94
  current_length = system_message_tokens
 
100
  turn_tokens = user_tokens + assistant_tokens
101
 
102
  if current_length + turn_tokens <= max_length:
103
+ truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning
104
  current_length += turn_tokens
105
  else:
106
  break # Stop adding turns if we exceed the limit
107
 
108
  return truncated_history
109
 
110
+ def respond(
111
+ message,
112
+ history: list[tuple[str, str]],
113
+ system_message, # System message is now an argument
114
+ max_tokens,
115
+ temperature,
116
+ top_p,
117
+ ):
118
+ """Responds to a user message, maintaining conversation history, using special tokens and message list."""
119
 
120
+ if message.lower() == "clear memory": # Check for the clear memory command
121
+ return "", [] # Return empty message and empty history to reset the chat
122
 
123
+ formatted_system_message = system_message # Use the system_message argument
124
+ truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100) # Reserve space for the new message and some generation
125
+
126
+ messages = [{"role": "system", "content": formatted_system_message}] # Start with system message as before
127
  for user_msg, assistant_msg in truncated_history:
128
  if user_msg:
129
+ messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"}) # Format history user message
130
  if assistant_msg:
131
+ messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"}) # Format history assistant message
132
+
133
+ messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) # Format current user message
134
 
135
  response = ""
 
136
  try:
137
+ for chunk in client.chat_completion(
138
+ messages, # Send the messages list again, but with formatted content
139
+ max_tokens=max_tokens,
140
+ stream=True,
141
+ temperature=temperature,
142
+ top_p=top_p,
143
+ ):
144
+ token = chunk.choices[0].delta.content
145
+ response += token
146
+ yield response
 
 
 
 
 
 
147
  except Exception as e:
148
+ print(f"An error occurred: {e}") # It's a good practice add a try-except block
149
+ yield "I'm sorry, I encountered an error. Please try again."
 
 
 
150
 
151
  # --- Gradio Interface ---
152
  demo = gr.ChatInterface(
 
156
  value=default_nvc_prompt_template,
157
  label="System message",
158
  visible=True,
159
+ lines=10, # Increased height for more space to read the prompt
160
  ),
161
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
162
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
163
+ gr.Slider(
164
+ minimum=0.1,
165
+ maximum=1.0,
166
+ value=0.95,
167
+ step=0.05,
168
+ label="Top-p (nucleus sampling)",
169
+ ),
170
  ],
 
171
  )
172
 
173
  if __name__ == "__main__":
174
+ demo.launch(share=True)