Update app.py
Browse files
app.py
CHANGED
@@ -81,14 +81,7 @@ def count_tokens(text: str) -> int:
|
|
81 |
return len(tokenizer.encode(text))
|
82 |
|
83 |
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
|
84 |
-
"""Truncates the conversation history to fit within the maximum token limit.
|
85 |
-
Args:
|
86 |
-
history: The conversation history (list of user/assistant tuples).
|
87 |
-
system_message: The system message.
|
88 |
-
max_length: The maximum number of tokens allowed.
|
89 |
-
Returns:
|
90 |
-
The truncated history.
|
91 |
-
"""
|
92 |
truncated_history = []
|
93 |
system_message_tokens = count_tokens(system_message)
|
94 |
current_length = system_message_tokens
|
@@ -100,53 +93,55 @@ def truncate_history(history: list[tuple[str, str]], system_message: str, max_le
|
|
100 |
turn_tokens = user_tokens + assistant_tokens
|
101 |
|
102 |
if current_length + turn_tokens <= max_length:
|
103 |
-
truncated_history.insert(0, (user_msg, assistant_msg))
|
104 |
current_length += turn_tokens
|
105 |
else:
|
106 |
break # Stop adding turns if we exceed the limit
|
107 |
|
108 |
return truncated_history
|
109 |
|
110 |
-
def respond(
|
111 |
-
message,
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
temperature,
|
116 |
-
top_p,
|
117 |
-
):
|
118 |
-
"""Responds to a user message, maintaining conversation history, using special tokens and message list."""
|
119 |
|
120 |
-
|
121 |
-
|
122 |
|
123 |
-
|
124 |
-
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100) # Reserve space for the new message and some generation
|
125 |
-
|
126 |
-
messages = [{"role": "system", "content": formatted_system_message}] # Start with system message as before
|
127 |
for user_msg, assistant_msg in truncated_history:
|
128 |
if user_msg:
|
129 |
-
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
|
130 |
if assistant_msg:
|
131 |
-
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
|
132 |
-
|
133 |
-
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) # Format current user message
|
134 |
|
135 |
response = ""
|
|
|
136 |
try:
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
except Exception as e:
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
150 |
|
151 |
# --- Gradio Interface ---
|
152 |
demo = gr.ChatInterface(
|
@@ -156,19 +151,15 @@ demo = gr.ChatInterface(
|
|
156 |
value=default_nvc_prompt_template,
|
157 |
label="System message",
|
158 |
visible=True,
|
159 |
-
lines=10
|
160 |
),
|
161 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
162 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
163 |
-
gr.Slider(
|
164 |
-
|
165 |
-
maximum=1.0,
|
166 |
-
value=0.95,
|
167 |
-
step=0.05,
|
168 |
-
label="Top-p (nucleus sampling)",
|
169 |
-
),
|
170 |
],
|
|
|
171 |
)
|
172 |
|
173 |
if __name__ == "__main__":
|
174 |
-
demo.launch(share=True)
|
|
|
81 |
return len(tokenizer.encode(text))
|
82 |
|
83 |
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]:
|
84 |
+
"""Truncates the conversation history to fit within the maximum token limit."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
truncated_history = []
|
86 |
system_message_tokens = count_tokens(system_message)
|
87 |
current_length = system_message_tokens
|
|
|
93 |
turn_tokens = user_tokens + assistant_tokens
|
94 |
|
95 |
if current_length + turn_tokens <= max_length:
|
96 |
+
truncated_history.insert(0, (user_msg, assistant_msg))
|
97 |
current_length += turn_tokens
|
98 |
else:
|
99 |
break # Stop adding turns if we exceed the limit
|
100 |
|
101 |
return truncated_history
|
102 |
|
103 |
+
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, clear_memory):
|
104 |
+
"""Responds to a user message, maintaining conversation history using a streaming generator."""
|
105 |
+
# Check for the clear memory command (or if the Clear Memory button was triggered)
|
106 |
+
if message.lower() == "clear memory" or clear_memory:
|
107 |
+
return "", [] # Reset the chat
|
|
|
|
|
|
|
|
|
108 |
|
109 |
+
formatted_system_message = system_message # Use the provided system message
|
110 |
+
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
|
111 |
|
112 |
+
messages = [{"role": "system", "content": formatted_system_message}]
|
|
|
|
|
|
|
113 |
for user_msg, assistant_msg in truncated_history:
|
114 |
if user_msg:
|
115 |
+
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
|
116 |
if assistant_msg:
|
117 |
+
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
|
118 |
+
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
|
|
|
119 |
|
120 |
response = ""
|
121 |
+
yielded_any = False
|
122 |
try:
|
123 |
+
for chunk in client.chat_completion(
|
124 |
+
messages,
|
125 |
+
max_tokens=max_tokens,
|
126 |
+
stream=True,
|
127 |
+
temperature=temperature,
|
128 |
+
top_p=top_p,
|
129 |
+
):
|
130 |
+
token = chunk.choices[0].delta.content
|
131 |
+
response += token
|
132 |
+
yield response
|
133 |
+
yielded_any = True
|
134 |
+
except StopAsyncIteration:
|
135 |
+
# If the generator stops without yielding any tokens, yield the accumulated response
|
136 |
+
if not yielded_any:
|
137 |
+
yield response
|
138 |
+
return
|
139 |
except Exception as e:
|
140 |
+
print(f"An error occurred: {e}")
|
141 |
+
yield "I'm sorry, I encountered an error. Please try again."
|
142 |
+
# Ensure at least one yield if no tokens were yielded inside the loop
|
143 |
+
if not yielded_any:
|
144 |
+
yield response
|
145 |
|
146 |
# --- Gradio Interface ---
|
147 |
demo = gr.ChatInterface(
|
|
|
151 |
value=default_nvc_prompt_template,
|
152 |
label="System message",
|
153 |
visible=True,
|
154 |
+
lines=10 # Increased height for easier reading of the prompt
|
155 |
),
|
156 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
157 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
158 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
159 |
+
gr.Button("Clear Memory"),
|
|
|
|
|
|
|
|
|
|
|
160 |
],
|
161 |
+
chatbot_kwargs={"format": "messages"} # Use messages format to avoid deprecation warnings
|
162 |
)
|
163 |
|
164 |
if __name__ == "__main__":
|
165 |
+
demo.launch(share=True) # Note: share=True is not supported on Hugging Face Spaces.
|