Spaces:
Running
Running
Update app.py
Browse filesAdded type="messages" parameter to the Chatbot component to use the new format, which aligns with the OpenAI-style message format with "role" and "content" keys.
Updated the chat history handling throughout the code to use the new message format with dictionaries instead of tuples.
Modified the response function to properly create and update the chat history in the new format.
This update will:
Eliminate the deprecation warning
Make your code more future-proof as Gradio moves to this new format
Maintain all the functionality of your original chatbot
app.py
CHANGED
@@ -40,20 +40,19 @@ def respond(message, chat_history, genre=None):
|
|
40 |
system_message = get_enhanced_system_prompt(genre)
|
41 |
|
42 |
# Initialize messages with system prompt
|
43 |
-
|
44 |
|
45 |
# Add history (limited to last 5 exchanges to prevent token overflow)
|
46 |
memory_length = 5
|
47 |
-
for
|
48 |
-
|
49 |
-
messages.append({"role": "assistant", "content": bot_msg})
|
50 |
|
51 |
-
# Add current message
|
52 |
-
|
53 |
|
54 |
# Special handling for story initialization
|
55 |
if len(chat_history) == 0 or message.lower() in ["start", "begin", "begin my adventure"]:
|
56 |
-
|
57 |
"role": "system",
|
58 |
"content": f"Begin a new {genre or random.choice(STORY_GENRES)} adventure with an intriguing opening scene."
|
59 |
})
|
@@ -62,7 +61,7 @@ def respond(message, chat_history, genre=None):
|
|
62 |
response = ""
|
63 |
try:
|
64 |
for chunk in client.chat_completion(
|
65 |
-
|
66 |
max_tokens=512,
|
67 |
stream=True,
|
68 |
temperature=0.7,
|
@@ -71,10 +70,18 @@ def respond(message, chat_history, genre=None):
|
|
71 |
token = chunk.choices[0].delta.content
|
72 |
if token: # Check if token is not None
|
73 |
response += token
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
75 |
except Exception as e:
|
76 |
error_response = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
77 |
-
|
|
|
|
|
|
|
78 |
|
79 |
# Create interface with additional customization
|
80 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
@@ -88,12 +95,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
88 |
bubble_full_width=False,
|
89 |
show_copy_button=True,
|
90 |
avatar_images=(None, "🧙"),
|
91 |
-
label="Chatbot"
|
|
|
92 |
)
|
93 |
|
94 |
-
# Important: This properly initializes the chatbot with an empty list of message pairs
|
95 |
-
state = gr.State([])
|
96 |
-
|
97 |
msg = gr.Textbox(
|
98 |
placeholder="Describe what you want to do next in the story...",
|
99 |
container=False,
|
@@ -124,11 +129,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
124 |
inputs=msg
|
125 |
)
|
126 |
|
127 |
-
#
|
128 |
msg.submit(respond, [msg, chatbot, genre], [chatbot])
|
129 |
submit.click(respond, [msg, chatbot, genre], [chatbot])
|
130 |
|
131 |
-
#
|
132 |
clear.click(lambda: [], None, chatbot, queue=False)
|
133 |
clear.click(lambda: "", None, msg, queue=False)
|
134 |
|
|
|
40 |
system_message = get_enhanced_system_prompt(genre)
|
41 |
|
42 |
# Initialize messages with system prompt
|
43 |
+
api_messages = [{"role": "system", "content": system_message}]
|
44 |
|
45 |
# Add history (limited to last 5 exchanges to prevent token overflow)
|
46 |
memory_length = 5
|
47 |
+
for msg in chat_history[-memory_length*2:]:
|
48 |
+
api_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
|
49 |
|
50 |
+
# Add current user message
|
51 |
+
api_messages.append({"role": "user", "content": message})
|
52 |
|
53 |
# Special handling for story initialization
|
54 |
if len(chat_history) == 0 or message.lower() in ["start", "begin", "begin my adventure"]:
|
55 |
+
api_messages.append({
|
56 |
"role": "system",
|
57 |
"content": f"Begin a new {genre or random.choice(STORY_GENRES)} adventure with an intriguing opening scene."
|
58 |
})
|
|
|
61 |
response = ""
|
62 |
try:
|
63 |
for chunk in client.chat_completion(
|
64 |
+
api_messages,
|
65 |
max_tokens=512,
|
66 |
stream=True,
|
67 |
temperature=0.7,
|
|
|
70 |
token = chunk.choices[0].delta.content
|
71 |
if token: # Check if token is not None
|
72 |
response += token
|
73 |
+
|
74 |
+
# Use new message format
|
75 |
+
history = chat_history.copy()
|
76 |
+
history.append({"role": "user", "content": message})
|
77 |
+
history.append({"role": "assistant", "content": response})
|
78 |
+
yield history
|
79 |
except Exception as e:
|
80 |
error_response = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
|
81 |
+
history = chat_history.copy()
|
82 |
+
history.append({"role": "user", "content": message})
|
83 |
+
history.append({"role": "assistant", "content": error_response})
|
84 |
+
yield history
|
85 |
|
86 |
# Create interface with additional customization
|
87 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
95 |
bubble_full_width=False,
|
96 |
show_copy_button=True,
|
97 |
avatar_images=(None, "🧙"),
|
98 |
+
label="Chatbot",
|
99 |
+
type="messages" # Use the new messages format
|
100 |
)
|
101 |
|
|
|
|
|
|
|
102 |
msg = gr.Textbox(
|
103 |
placeholder="Describe what you want to do next in the story...",
|
104 |
container=False,
|
|
|
129 |
inputs=msg
|
130 |
)
|
131 |
|
132 |
+
# Event handlers for the new message format
|
133 |
msg.submit(respond, [msg, chatbot, genre], [chatbot])
|
134 |
submit.click(respond, [msg, chatbot, genre], [chatbot])
|
135 |
|
136 |
+
# Reset the chatbot with an empty list for the new format
|
137 |
clear.click(lambda: [], None, chatbot, queue=False)
|
138 |
clear.click(lambda: "", None, msg, queue=False)
|
139 |
|