PeterPinetree commited on
Commit
f010e45
·
verified ·
1 Parent(s): d1b54ac

Update app.py

Browse files

Fixed the yield statement: The error happens because the yield statement wasn't properly formatting the chat history as a list of tuples. I changed it to yield the correct format: yield chat_history + [[message, response]]
Proper Chatbot initialization: Added explicit initialization of the chatbot with an empty list to ensure it starts with the correct format
Fixed the clear button functionality: Now it properly resets the chatbot with an empty list instead of None
Improved error handling: Added a check to make sure tokens are not None before concatenating them to the response
Streamlined respond function: Simplified the respond function to properly manage the chat history format throughout the streaming process

Files changed (1) hide show
  1. app.py +28 -25
app.py CHANGED
@@ -5,18 +5,12 @@ import random
5
  # Initialize the inference client
6
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
- # Story genres and themes for variety
9
  STORY_GENRES = [
10
  "fantasy", "sci-fi", "mystery", "horror", "historical",
11
  "cyberpunk", "western", "post-apocalyptic", "steampunk"
12
  ]
13
 
14
- # Define character archetypes for the assistant to use
15
- CHARACTER_ARCHETYPES = [
16
- "the reluctant hero", "the wise mentor", "the cunning rogue",
17
- "the loyal companion", "the mysterious stranger", "the noble warrior"
18
- ]
19
-
20
  def get_enhanced_system_prompt(genre=None):
21
  """Generate a detailed system prompt with optional genre specification"""
22
  selected_genre = genre or random.choice(STORY_GENRES)
@@ -40,16 +34,17 @@ Keep responses engaging but concise (200-300 words maximum). If the user's input
40
 
41
  return system_message
42
 
43
- def respond(message, history, genre=None, memory_length=5):
44
  """Generate a response based on the current message and conversation history"""
45
- # Use a more detailed system prompt
46
  system_message = get_enhanced_system_prompt(genre)
47
 
48
  # Initialize messages with system prompt
49
  messages = [{"role": "system", "content": system_message}]
50
 
51
- # Add limited history to prevent token overflow (only use recent exchanges)
52
- for user_msg, bot_msg in history[-memory_length:]:
 
53
  messages.append({"role": "user", "content": user_msg})
54
  messages.append({"role": "assistant", "content": bot_msg})
55
 
@@ -57,31 +52,31 @@ def respond(message, history, genre=None, memory_length=5):
57
  messages.append({"role": "user", "content": message})
58
 
59
  # Special handling for story initialization
60
- if len(history) == 0 or message.lower() in ["start", "begin", "begin my adventure"]:
61
- # Add a specific instruction for starting a new story
62
  messages.append({
63
  "role": "system",
64
- "content": f"Begin a new {genre or random.choice(STORY_GENRES)} adventure with an intriguing opening scene. Introduce the protagonist without assuming too much about them, allowing the user to shape the character."
65
  })
66
 
67
- # Generate and stream response
68
  response = ""
69
  try:
70
- for message in client.chat_completion(
71
  messages,
72
  max_tokens=512,
73
  stream=True,
74
  temperature=0.7,
75
  top_p=0.95,
76
  ):
77
- token = message.choices[0].delta.content
78
- response += token
79
- yield response
 
80
  except Exception as e:
81
- error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
82
- yield error_message
83
 
84
- # Create interface with additional customization options
85
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
86
  gr.Markdown("# 🔮 Interactive Story Adventure")
87
  gr.Markdown("Immerse yourself in an interactive story where your choices shape the narrative.")
@@ -93,7 +88,12 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
93
  bubble_full_width=False,
94
  show_copy_button=True,
95
  avatar_images=(None, "🧙"),
 
96
  )
 
 
 
 
97
  msg = gr.Textbox(
98
  placeholder="Describe what you want to do next in the story...",
99
  container=False,
@@ -124,9 +124,12 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
124
  inputs=msg
125
  )
126
 
127
- msg.submit(respond, [msg, chatbot, genre], chatbot)
128
- submit.click(respond, [msg, chatbot, genre], chatbot)
129
- clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
130
  clear.click(lambda: "", None, msg, queue=False)
131
 
132
  if __name__ == "__main__":
 
5
  # Initialize the inference client
6
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
+ # Story genres and themes
9
  STORY_GENRES = [
10
  "fantasy", "sci-fi", "mystery", "horror", "historical",
11
  "cyberpunk", "western", "post-apocalyptic", "steampunk"
12
  ]
13
 
 
 
 
 
 
 
14
  def get_enhanced_system_prompt(genre=None):
15
  """Generate a detailed system prompt with optional genre specification"""
16
  selected_genre = genre or random.choice(STORY_GENRES)
 
34
 
35
  return system_message
36
 
37
+ def respond(message, chat_history, genre=None):
38
  """Generate a response based on the current message and conversation history"""
39
+ # Use system prompt
40
  system_message = get_enhanced_system_prompt(genre)
41
 
42
  # Initialize messages with system prompt
43
  messages = [{"role": "system", "content": system_message}]
44
 
45
+ # Add history (limited to last 5 exchanges to prevent token overflow)
46
+ memory_length = 5
47
+ for user_msg, bot_msg in chat_history[-memory_length:]:
48
  messages.append({"role": "user", "content": user_msg})
49
  messages.append({"role": "assistant", "content": bot_msg})
50
 
 
52
  messages.append({"role": "user", "content": message})
53
 
54
  # Special handling for story initialization
55
+ if len(chat_history) == 0 or message.lower() in ["start", "begin", "begin my adventure"]:
 
56
  messages.append({
57
  "role": "system",
58
+ "content": f"Begin a new {genre or random.choice(STORY_GENRES)} adventure with an intriguing opening scene."
59
  })
60
 
61
+ # Generate response
62
  response = ""
63
  try:
64
+ for chunk in client.chat_completion(
65
  messages,
66
  max_tokens=512,
67
  stream=True,
68
  temperature=0.7,
69
  top_p=0.95,
70
  ):
71
+ token = chunk.choices[0].delta.content
72
+ if token: # Check if token is not None
73
+ response += token
74
+ yield chat_history + [[message, response]]
75
  except Exception as e:
76
+ error_response = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
77
+ yield chat_history + [[message, error_response]]
78
 
79
+ # Create interface with additional customization
80
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
  gr.Markdown("# 🔮 Interactive Story Adventure")
82
  gr.Markdown("Immerse yourself in an interactive story where your choices shape the narrative.")
 
88
  bubble_full_width=False,
89
  show_copy_button=True,
90
  avatar_images=(None, "🧙"),
91
+ label="Chatbot"
92
  )
93
+
94
+ # Important: This properly initializes the chatbot with an empty list of message pairs
95
+ state = gr.State([])
96
+
97
  msg = gr.Textbox(
98
  placeholder="Describe what you want to do next in the story...",
99
  container=False,
 
124
  inputs=msg
125
  )
126
 
127
+ # Fixed event handlers that maintain proper chat history format
128
+ msg.submit(respond, [msg, chatbot, genre], [chatbot])
129
+ submit.click(respond, [msg, chatbot, genre], [chatbot])
130
+
131
+ # Properly reset the chatbot with an empty list to fix the tuple format error
132
+ clear.click(lambda: [], None, chatbot, queue=False)
133
  clear.click(lambda: "", None, msg, queue=False)
134
 
135
  if __name__ == "__main__":