Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,11 @@ import random
|
|
5 |
from typing import Generator, Dict, List, Tuple, Optional
|
6 |
import logging # Added logging for better debugging
|
7 |
|
8 |
-
# Configure logging
|
9 |
-
logging.basicConfig(
|
|
|
|
|
|
|
10 |
|
11 |
# Get token from environment variable
|
12 |
hf_token = os.environ.get("HF_TOKEN")
|
@@ -135,48 +138,50 @@ def format_history_for_gradio(history_tuples):
|
|
135 |
|
136 |
# 1. Add type hints for better code maintainability
|
137 |
# 4. Add input validation
|
138 |
-
def respond(
|
139 |
-
message: str,
|
140 |
-
chat_history: List[Tuple[str, str]],
|
141 |
-
genre: Optional[str] = None,
|
142 |
-
use_full_memory: bool = True
|
143 |
-
) -> Tuple[str, List[Tuple[str, str]]]:
|
144 |
"""Generate a response based on the current message and conversation history."""
|
145 |
if not message.strip():
|
146 |
return "", chat_history
|
147 |
|
148 |
try:
|
149 |
-
# Start with
|
150 |
api_messages = [{"role": "system", "content": get_enhanced_system_prompt(genre)}]
|
|
|
151 |
|
152 |
-
# Add
|
153 |
if chat_history and use_full_memory:
|
154 |
for user_msg, bot_msg in chat_history[-MEMORY_WINDOW:]:
|
155 |
-
api_messages.
|
156 |
-
|
|
|
|
|
|
|
157 |
|
158 |
-
# Add
|
159 |
api_messages.append({"role": "user", "content": str(message)})
|
|
|
160 |
|
161 |
-
# Make
|
|
|
162 |
response = client.chat_completion(
|
163 |
messages=api_messages,
|
164 |
max_tokens=MAX_TOKENS,
|
165 |
temperature=TEMPERATURE,
|
166 |
top_p=TOP_P
|
167 |
)
|
|
|
168 |
|
169 |
-
# Extract response
|
170 |
bot_message = response.choices[0].message.content
|
|
|
171 |
|
172 |
-
# Update
|
173 |
updated_history = chat_history + [(message, bot_message)]
|
174 |
return "", updated_history
|
175 |
|
176 |
except Exception as e:
|
177 |
-
# Enhanced error handling
|
178 |
-
error_msg = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
179 |
logging.error("Error in respond function", exc_info=True)
|
|
|
180 |
return "", chat_history + [(message, error_msg)]
|
181 |
|
182 |
def save_story(chat_history):
|
|
|
5 |
from typing import Generator, Dict, List, Tuple, Optional
|
6 |
import logging # Added logging for better debugging
|
7 |
|
8 |
+
# Configure logging with DEBUG level
|
9 |
+
logging.basicConfig(
|
10 |
+
level=logging.DEBUG,
|
11 |
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
12 |
+
)
|
13 |
|
14 |
# Get token from environment variable
|
15 |
hf_token = os.environ.get("HF_TOKEN")
|
|
|
138 |
|
139 |
# 1. Add type hints for better code maintainability
|
140 |
# 4. Add input validation
|
141 |
+
def respond(message: str, chat_history: List[Tuple[str, str]], genre: Optional[str] = None, use_full_memory: bool = True) -> Tuple[str, List[Tuple[str, str]]]:
|
|
|
|
|
|
|
|
|
|
|
142 |
"""Generate a response based on the current message and conversation history."""
|
143 |
if not message.strip():
|
144 |
return "", chat_history
|
145 |
|
146 |
try:
|
147 |
+
# Start with system prompt
|
148 |
api_messages = [{"role": "system", "content": get_enhanced_system_prompt(genre)}]
|
149 |
+
logging.debug(f"System Message: {api_messages[0]}")
|
150 |
|
151 |
+
# Add chat history
|
152 |
if chat_history and use_full_memory:
|
153 |
for user_msg, bot_msg in chat_history[-MEMORY_WINDOW:]:
|
154 |
+
api_messages.extend([
|
155 |
+
{"role": "user", "content": str(user_msg)},
|
156 |
+
{"role": "assistant", "content": str(bot_msg)}
|
157 |
+
])
|
158 |
+
logging.debug(f"Chat History Messages: {api_messages[1:]}")
|
159 |
|
160 |
+
# Add current message
|
161 |
api_messages.append({"role": "user", "content": str(message)})
|
162 |
+
logging.debug(f"Final Message List: {api_messages}")
|
163 |
|
164 |
+
# Make API call
|
165 |
+
logging.debug("Making API call...")
|
166 |
response = client.chat_completion(
|
167 |
messages=api_messages,
|
168 |
max_tokens=MAX_TOKENS,
|
169 |
temperature=TEMPERATURE,
|
170 |
top_p=TOP_P
|
171 |
)
|
172 |
+
logging.debug("API call completed")
|
173 |
|
174 |
+
# Extract response
|
175 |
bot_message = response.choices[0].message.content
|
176 |
+
logging.debug(f"Bot Response: {bot_message[:100]}...") # First 100 chars
|
177 |
|
178 |
+
# Update history
|
179 |
updated_history = chat_history + [(message, bot_message)]
|
180 |
return "", updated_history
|
181 |
|
182 |
except Exception as e:
|
|
|
|
|
183 |
logging.error("Error in respond function", exc_info=True)
|
184 |
+
error_msg = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
185 |
return "", chat_history + [(message, error_msg)]
|
186 |
|
187 |
def save_story(chat_history):
|