Update app.py
Browse files
app.py
CHANGED
|
@@ -105,13 +105,17 @@ def format_response_with_thinking(response):
|
|
| 105 |
# If no thinking tags, return the original response
|
| 106 |
return response
|
| 107 |
|
| 108 |
-
def chat_submit(message,
|
| 109 |
"""Process a new message and update the chat history"""
|
| 110 |
if not message.strip():
|
| 111 |
-
return "",
|
| 112 |
|
| 113 |
model_id = MODELS.get(model_name, MODELS["Athena-R3X 4B"])
|
| 114 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
response, load_time, generation_time = generate_response(
|
| 116 |
model_id, conversation_state, message, max_length, temperature
|
| 117 |
)
|
|
@@ -124,13 +128,17 @@ def chat_submit(message, chat_history, conversation_state, model_name, max_lengt
|
|
| 124 |
formatted_response = format_response_with_thinking(response)
|
| 125 |
|
| 126 |
# Update the visible chat history
|
| 127 |
-
|
|
|
|
| 128 |
|
| 129 |
-
return "",
|
| 130 |
except Exception as e:
|
|
|
|
|
|
|
|
|
|
| 131 |
error_message = f"Error: {str(e)}"
|
| 132 |
-
|
| 133 |
-
return "",
|
| 134 |
|
| 135 |
css = """
|
| 136 |
.message {
|
|
@@ -171,27 +179,9 @@ css = """
|
|
| 171 |
}
|
| 172 |
"""
|
| 173 |
|
| 174 |
-
# Add JavaScript to handle the toggle functionality
|
| 175 |
-
js = """
|
| 176 |
-
function setupThinkingToggles() {
|
| 177 |
-
document.querySelectorAll('.thinking-toggle').forEach(button => {
|
| 178 |
-
button.addEventListener('click', function() {
|
| 179 |
-
const content = this.nextElementSibling;
|
| 180 |
-
content.classList.toggle('hidden');
|
| 181 |
-
this.textContent = content.classList.contains('hidden') ? 'Show reasoning' : 'Hide reasoning';
|
| 182 |
-
});
|
| 183 |
-
});
|
| 184 |
-
}
|
| 185 |
-
|
| 186 |
-
// Run after the page loads and when the chat updates
|
| 187 |
-
document.addEventListener('DOMContentLoaded', setupThinkingToggles);
|
| 188 |
-
const observer = new MutationObserver(setupThinkingToggles);
|
| 189 |
-
observer.observe(document.body, { childList: true, subtree: true });
|
| 190 |
-
"""
|
| 191 |
-
|
| 192 |
theme = gr.themes.Monochrome()
|
| 193 |
|
| 194 |
-
with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme
|
| 195 |
gr.Markdown("# 🚀 Athena Playground Chat")
|
| 196 |
gr.Markdown("*Powered by HuggingFace ZeroGPU*")
|
| 197 |
|
|
@@ -204,6 +194,9 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme, js=js) as d
|
|
| 204 |
user_input = gr.Textbox(label="Your message", scale=8, autofocus=True, placeholder="Type your message here...")
|
| 205 |
send_btn = gr.Button(value="Send", scale=1, variant="primary")
|
| 206 |
|
|
|
|
|
|
|
|
|
|
| 207 |
# Configuration controls
|
| 208 |
gr.Markdown("### ⚙️ Model & Generation Settings")
|
| 209 |
with gr.Row():
|
|
@@ -214,7 +207,7 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme, js=js) as d
|
|
| 214 |
info="Select which Athena model to use"
|
| 215 |
)
|
| 216 |
max_length = gr.Slider(
|
| 217 |
-
32,
|
| 218 |
label="📝 Max Tokens",
|
| 219 |
info="Maximum number of tokens to generate"
|
| 220 |
)
|
|
@@ -224,18 +217,26 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme, js=js) as d
|
|
| 224 |
info="Higher values = more creative responses"
|
| 225 |
)
|
| 226 |
|
| 227 |
-
#
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
chat_submit,
|
| 230 |
inputs=[user_input, chatbot, conversation_state, model_choice, max_length, temperature],
|
| 231 |
outputs=[user_input, chatbot, conversation_state]
|
| 232 |
)
|
| 233 |
|
|
|
|
| 234 |
send_btn.click(
|
| 235 |
chat_submit,
|
| 236 |
inputs=[user_input, chatbot, conversation_state, model_choice, max_length, temperature],
|
| 237 |
outputs=[user_input, chatbot, conversation_state]
|
| 238 |
)
|
|
|
|
|
|
|
|
|
|
| 239 |
|
| 240 |
# Add examples if desired
|
| 241 |
gr.Examples(
|
|
@@ -255,4 +256,4 @@ with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme, js=js) as d
|
|
| 255 |
""")
|
| 256 |
|
| 257 |
if __name__ == "__main__":
|
| 258 |
-
demo.launch()
|
|
|
|
| 105 |
# If no thinking tags, return the original response
|
| 106 |
return response
|
| 107 |
|
| 108 |
+
def chat_submit(message, history, conversation_state, model_name, max_length, temperature):
|
| 109 |
"""Process a new message and update the chat history"""
|
| 110 |
if not message.strip():
|
| 111 |
+
return "", history, conversation_state
|
| 112 |
|
| 113 |
model_id = MODELS.get(model_name, MODELS["Athena-R3X 4B"])
|
| 114 |
try:
|
| 115 |
+
# Print debug info to help diagnose issues
|
| 116 |
+
print(f"Processing message: {message}")
|
| 117 |
+
print(f"Selected model: {model_name} ({model_id})")
|
| 118 |
+
|
| 119 |
response, load_time, generation_time = generate_response(
|
| 120 |
model_id, conversation_state, message, max_length, temperature
|
| 121 |
)
|
|
|
|
| 128 |
formatted_response = format_response_with_thinking(response)
|
| 129 |
|
| 130 |
# Update the visible chat history
|
| 131 |
+
history.append((message, formatted_response))
|
| 132 |
+
print(f"Response added to history. Current length: {len(history)}")
|
| 133 |
|
| 134 |
+
return "", history, conversation_state
|
| 135 |
except Exception as e:
|
| 136 |
+
import traceback
|
| 137 |
+
print(f"Error in chat_submit: {str(e)}")
|
| 138 |
+
print(traceback.format_exc())
|
| 139 |
error_message = f"Error: {str(e)}"
|
| 140 |
+
history.append((message, error_message))
|
| 141 |
+
return "", history, conversation_state
|
| 142 |
|
| 143 |
css = """
|
| 144 |
.message {
|
|
|
|
| 179 |
}
|
| 180 |
"""
|
| 181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
theme = gr.themes.Monochrome()
|
| 183 |
|
| 184 |
+
with gr.Blocks(title="Athena Playground Chat", css=css, theme=theme) as demo:
|
| 185 |
gr.Markdown("# 🚀 Athena Playground Chat")
|
| 186 |
gr.Markdown("*Powered by HuggingFace ZeroGPU*")
|
| 187 |
|
|
|
|
| 194 |
user_input = gr.Textbox(label="Your message", scale=8, autofocus=True, placeholder="Type your message here...")
|
| 195 |
send_btn = gr.Button(value="Send", scale=1, variant="primary")
|
| 196 |
|
| 197 |
+
# Clear button for resetting the conversation
|
| 198 |
+
clear_btn = gr.Button("Clear Conversation")
|
| 199 |
+
|
| 200 |
# Configuration controls
|
| 201 |
gr.Markdown("### ⚙️ Model & Generation Settings")
|
| 202 |
with gr.Row():
|
|
|
|
| 207 |
info="Select which Athena model to use"
|
| 208 |
)
|
| 209 |
max_length = gr.Slider(
|
| 210 |
+
32, 2048, value=512,
|
| 211 |
label="📝 Max Tokens",
|
| 212 |
info="Maximum number of tokens to generate"
|
| 213 |
)
|
|
|
|
| 217 |
info="Higher values = more creative responses"
|
| 218 |
)
|
| 219 |
|
| 220 |
+
# Function to clear the conversation
|
| 221 |
+
def clear_conversation():
|
| 222 |
+
return [], []
|
| 223 |
+
|
| 224 |
+
# Connect the interface components - note the specific ordering
|
| 225 |
+
user_input.submit(
|
| 226 |
chat_submit,
|
| 227 |
inputs=[user_input, chatbot, conversation_state, model_choice, max_length, temperature],
|
| 228 |
outputs=[user_input, chatbot, conversation_state]
|
| 229 |
)
|
| 230 |
|
| 231 |
+
# Make sure send button uses the exact same function with the same parameter ordering
|
| 232 |
send_btn.click(
|
| 233 |
chat_submit,
|
| 234 |
inputs=[user_input, chatbot, conversation_state, model_choice, max_length, temperature],
|
| 235 |
outputs=[user_input, chatbot, conversation_state]
|
| 236 |
)
|
| 237 |
+
|
| 238 |
+
# Connect clear button
|
| 239 |
+
clear_btn.click(clear_conversation, outputs=[chatbot, conversation_state])
|
| 240 |
|
| 241 |
# Add examples if desired
|
| 242 |
gr.Examples(
|
|
|
|
| 256 |
""")
|
| 257 |
|
| 258 |
if __name__ == "__main__":
|
| 259 |
+
demo.launch(debug=True) # Enable debug mode for better error reporting
|