Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -126,14 +126,8 @@ def create_story_summary(chat_history):
|
|
126 |
return summary_instruction
|
127 |
|
128 |
def format_history_for_gradio(history_tuples):
|
129 |
-
"""Convert
|
130 |
-
|
131 |
-
for user_msg, bot_msg in history_tuples:
|
132 |
-
messages.extend([
|
133 |
-
{"role": "user", "content": str(user_msg)},
|
134 |
-
{"role": "assistant", "content": str(bot_msg)}
|
135 |
-
])
|
136 |
-
return messages
|
137 |
|
138 |
# 1. Add type hints for better code maintainability
|
139 |
# 4. Add input validation
|
@@ -142,52 +136,40 @@ def respond(
|
|
142 |
chat_history: List[Tuple[str, str]],
|
143 |
genre: Optional[str] = None,
|
144 |
use_full_memory: bool = True
|
145 |
-
) -> Generator[List[
|
146 |
"""Generate a response based on the current message and conversation history."""
|
147 |
if not message.strip():
|
148 |
return chat_history
|
149 |
|
150 |
-
system_message =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
-
# Format history properly
|
153 |
-
formatted_history = []
|
154 |
if chat_history:
|
155 |
-
for user_msg, bot_msg in chat_history:
|
156 |
-
|
157 |
{"role": "user", "content": str(user_msg)},
|
158 |
{"role": "assistant", "content": str(bot_msg)}
|
159 |
])
|
160 |
|
161 |
-
|
162 |
-
api_messages = [
|
163 |
-
{"role": "system", "content": system_message}
|
164 |
-
]
|
165 |
-
|
166 |
-
# Add memory management
|
167 |
-
if use_full_memory and formatted_history:
|
168 |
-
if len(formatted_history) > MAX_HISTORY_LENGTH:
|
169 |
-
summary_instruction = create_story_summary(chat_history[:len(chat_history)-5])
|
170 |
-
if summary_instruction:
|
171 |
-
api_messages.append(summary_instruction)
|
172 |
-
api_messages.extend(formatted_history[-MEMORY_WINDOW*2:])
|
173 |
-
else:
|
174 |
-
api_messages.extend(formatted_history)
|
175 |
-
else:
|
176 |
-
api_messages.extend(formatted_history[-MEMORY_WINDOW*2:])
|
177 |
-
|
178 |
-
# Add current user message
|
179 |
-
api_messages.append({"role": "user", "content": str(message)})
|
180 |
-
|
181 |
-
# Add choice enforcement
|
182 |
-
api_messages.append({
|
183 |
-
"role": "system",
|
184 |
-
"content": "Remember to end your response with exactly three numbered choices, each starting with 'You'"
|
185 |
-
})
|
186 |
|
187 |
-
bot_message = ""
|
188 |
try:
|
|
|
189 |
for response_chunk in client.chat_completion(
|
190 |
-
|
191 |
max_tokens=MAX_TOKENS,
|
192 |
stream=True,
|
193 |
temperature=TEMPERATURE,
|
@@ -198,12 +180,15 @@ def respond(
|
|
198 |
if delta:
|
199 |
bot_message += delta
|
200 |
if len(bot_message.strip()) >= MIN_RESPONSE_LENGTH:
|
201 |
-
|
202 |
-
|
|
|
|
|
203 |
except Exception as e:
|
204 |
error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
205 |
-
|
206 |
-
|
|
|
207 |
|
208 |
def save_story(chat_history):
|
209 |
"""Convert chat history to markdown for download"""
|
@@ -343,4 +328,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
343 |
|
344 |
# Run the app
|
345 |
if __name__ == "__main__":
|
346 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
126 |
return summary_instruction
|
127 |
|
128 |
def format_history_for_gradio(history_tuples):
|
129 |
+
"""Convert chat history to Gradio's message format."""
|
130 |
+
return [(str(user_msg), str(bot_msg)) for user_msg, bot_msg in history_tuples]
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
# 1. Add type hints for better code maintainability
|
133 |
# 4. Add input validation
|
|
|
136 |
chat_history: List[Tuple[str, str]],
|
137 |
genre: Optional[str] = None,
|
138 |
use_full_memory: bool = True
|
139 |
+
) -> Generator[List[Tuple[str, str]], None, None]:
|
140 |
"""Generate a response based on the current message and conversation history."""
|
141 |
if not message.strip():
|
142 |
return chat_history
|
143 |
|
144 |
+
system_message = f"""You are an interactive storyteller creating an immersive {genre or 'fantasy'} story.
|
145 |
+
When responding:
|
146 |
+
1. First, write 2-3 paragraphs (150-300 words) continuing the story based on the user's choice
|
147 |
+
2. Use vivid sensory details and second-person perspective ("you", "your")
|
148 |
+
3. Include dialogue and character thoughts
|
149 |
+
4. After the story segment, provide exactly three numbered choices for what to do next
|
150 |
+
5. Format the choices as:
|
151 |
+
|
152 |
+
1. You [action verb]...
|
153 |
+
2. You [action verb]...
|
154 |
+
3. You [action verb]...
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Format history properly for the API
|
158 |
+
formatted_messages = [{"role": "system", "content": system_message}]
|
159 |
|
|
|
|
|
160 |
if chat_history:
|
161 |
+
for user_msg, bot_msg in chat_history[-MEMORY_WINDOW:]:
|
162 |
+
formatted_messages.extend([
|
163 |
{"role": "user", "content": str(user_msg)},
|
164 |
{"role": "assistant", "content": str(bot_msg)}
|
165 |
])
|
166 |
|
167 |
+
formatted_messages.append({"role": "user", "content": str(message)})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
|
|
169 |
try:
|
170 |
+
bot_message = ""
|
171 |
for response_chunk in client.chat_completion(
|
172 |
+
formatted_messages,
|
173 |
max_tokens=MAX_TOKENS,
|
174 |
stream=True,
|
175 |
temperature=TEMPERATURE,
|
|
|
180 |
if delta:
|
181 |
bot_message += delta
|
182 |
if len(bot_message.strip()) >= MIN_RESPONSE_LENGTH:
|
183 |
+
chat_history = list(chat_history)
|
184 |
+
chat_history.append((message, bot_message))
|
185 |
+
yield chat_history
|
186 |
+
|
187 |
except Exception as e:
|
188 |
error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
189 |
+
chat_history = list(chat_history)
|
190 |
+
chat_history.append((message, error_message))
|
191 |
+
yield chat_history
|
192 |
|
193 |
def save_story(chat_history):
|
194 |
"""Convert chat history to markdown for download"""
|
|
|
328 |
|
329 |
# Run the app
|
330 |
if __name__ == "__main__":
|
331 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|