Spaces:
Running
Running
File size: 14,503 Bytes
92ddc2e 9f69398 92ddc2e d1b54ac f330958 92ddc2e 9f69398 92ddc2e c4ad27f 2f9cd55 43c5c70 c4ad27f a881bae 5828d2c cd693fd a881bae 5828d2c a881bae c4ad27f 2f9cd55 d1b54ac c4ad27f d1b54ac cd693fd d1b54ac cd693fd d1b54ac 4b95bd0 b377514 4b95bd0 3d6904e 2f9cd55 3d6904e a881bae f330958 a881bae f330958 2f9cd55 a881bae d1b54ac b377514 3d6904e c4ad27f 4b95bd0 c4ad27f e6a0ebd 3f0c27c 3d6904e 4b95bd0 a881bae 4b95bd0 a881bae 4b95bd0 a881bae 4b95bd0 3f0c27c 3d6904e e6a0ebd 3f0c27c 3d6904e 4b95bd0 e6a0ebd 2f9cd55 b377514 d1b54ac c4ad27f d1b54ac b377514 e6a0ebd a881bae d1b54ac a881bae d1b54ac b377514 c4ad27f 5828d2c d1b54ac c4ad27f 3d6904e d1b54ac 5616d3e c4ad27f d1b54ac 2f9cd55 d1b54ac b377514 d1b54ac 5828d2c d1b54ac 5828d2c d1b54ac c4ad27f d1b54ac 2f9cd55 d1b54ac 4b95bd0 b377514 4b95bd0 5616d3e 2f9cd55 b377514 2f9cd55 b377514 f330958 b377514 2f9cd55 b377514 f330958 b377514 f330958 2f9cd55 b377514 2f9cd55 b377514 5616d3e b377514 5616d3e 2f9cd55 b377514 2f9cd55 4b95bd0 f010e45 c4ad27f f010e45 d1b54ac 4b95bd0 2f9cd55 4b95bd0 b377514 4b95bd0 b377514 92ddc2e 2f9cd55 92ddc2e b377514 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
import gradio as gr
import os
from huggingface_hub import InferenceClient
import random
from typing import Generator, Dict, List, Tuple, Optional
# Get token from environment variable
hf_token = os.environ.get("HF_TOKEN")
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
# Story genres with genre-specific example prompts
GENRE_EXAMPLES = {
"fairy_tale": [
"I follow the shimmer of fairy dust into a hidden forest"
"I meet a talking rabbit who claims to know a secret about the king’s lost crown"
"A tiny dragon appears at my window, asking for help to find its mother"
"I step into a clearing where the trees whisper ancient riddles"
"A friendly witch invites me into her cozy cottage, offering a warm cup of tea"
],
"fantasy": [
"I enter the ancient forest seeking the wizard's tower",
"I approach the dragon cautiously with my shield raised",
"I examine the mysterious runes carved into the stone altar",
"I try to bargain with the elven council for safe passage"
],
"sci-fi": [
"I hack into the space station's mainframe",
"I investigate the strange signal coming from the abandoned planet",
"I negotiate with the alien ambassador about the peace treaty",
"I try to repair my damaged spacecraft before oxygen runs out"
],
"mystery": [
"I examine the crime scene for overlooked evidence",
"I question the nervous butler about the night of the murder",
"I follow the suspicious figure through the foggy streets",
"I check the victim's diary for hidden clues"
],
"horror": [
"I slowly open the creaking door to the basement",
"I read the forbidden text while the candles flicker",
"I hide under the bed as footsteps approach",
"I investigate the strange noises coming from the attic"
],
"western": [
"I challenge the outlaw to a duel at high noon",
"I track the bandits through the desert canyon",
"I enter the saloon looking for information",
"I defend the stagecoach from the approaching raiders"
],
"cyberpunk": [
"I jack into the corporate mainframe to steal data",
"I negotiate with the street gang for cybernetic upgrades",
"I hide in the neon-lit alleyway from corporate security",
"I meet my mysterious client in the underground bar"
],
"historical": [
"I attend the royal ball hoping to meet the mysterious count",
"I join the resistance against the occupying forces",
"I navigate the dangerous politics of the royal court",
"I set sail on a voyage to discover new lands"
],
"post-apocalyptic": [
"I scavenge the abandoned shopping mall for supplies",
"I approach the fortified settlement seeking shelter",
"I navigate through the radioactive zone using my old map",
"I hide from the approaching group of raiders"
],
"steampunk": [
"I pilot my airship through the lightning storm",
"I present my new invention to the Royal Academy",
"I investigate the mysterious clockwork automaton",
"I sneak aboard the emperor's armored train"
]
}
# 2. Add constants at the top for magic numbers
MAX_HISTORY_LENGTH = 20
MEMORY_WINDOW = 10
MAX_TOKENS = 2048 # Doubled for longer responses
TEMPERATURE = 0.8 # Slightly increased for more creative responses
TOP_P = 0.95
MIN_RESPONSE_LENGTH = 200 # Minimum characters before yielding response
def get_examples_for_genre(genre):
"""Get example prompts specific to the selected genre"""
return GENRE_EXAMPLES.get(genre, GENRE_EXAMPLES["fantasy"])
def get_enhanced_system_prompt(genre=None):
"""Generate a detailed system prompt with optional genre specification"""
selected_genre = genre or "fantasy"
system_message = f"""You are an interactive storyteller creating an immersive {selected_genre} choose-your-own-adventure story.
For each response you MUST:
1. Write at least 100 words describing the scene, using vivid sensory details
2. Include character dialogue or thoughts that reveal personality and motivations
3. Create a strong sense of atmosphere appropriate for {selected_genre}
4. End EVERY response with exactly three distinct choices, formatted as:
- Option 1: [Complete sentence describing a possible action]
- Option 2: [Complete sentence describing a possible action]
- Option 3: [Complete sentence describing a possible action]
Never respond with just one word. Always provide a detailed scene and three choices.
If the user's input is unclear, interpret their intent and continue the story naturally.
Keep the story cohesive by referencing previous events and choices."""
return system_message
def create_story_summary(chat_history):
"""Create a concise summary of the story so far if the history gets too long"""
if len(chat_history) <= 2:
return None
story_text = ""
for user_msg, bot_msg in chat_history:
story_text += f"User: {user_msg}\nStory: {bot_msg}\n\n"
summary_instruction = {
"role": "system",
"content": "The conversation history is getting long. Please create a brief summary of the key plot points and character development so far to help maintain context without exceeding token limits."
}
return summary_instruction
def format_history_for_gradio(history_tuples):
"""Convert (user, bot) tuples into Gradio 'messages' format (role/content dicts)."""
messages = []
for user_msg, bot_msg in history_tuples:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": bot_msg})
return messages
# 1. Add type hints for better code maintainability
# 4. Add input validation
def respond(
message: str,
chat_history: List[Tuple[str, str]],
genre: Optional[str] = None,
use_full_memory: bool = True
) -> Generator[List[Dict[str, str]], None, None]:
"""Generate a response based on the current message and conversation history."""
if not message.strip():
return chat_history
if genre and genre not in GENRE_EXAMPLES:
genre = "fantasy" # fallback to default
system_message = get_enhanced_system_prompt(genre)
# Convert your existing (user, bot) history into a format for the API request
formatted_history = []
for user_msg, bot_msg in chat_history:
formatted_history.append({"role": "user", "content": user_msg})
formatted_history.append({"role": "assistant", "content": bot_msg})
api_messages = [{"role": "system", "content": system_message}]
# Use full memory or partial memory
if use_full_memory and formatted_history:
if len(formatted_history) > MAX_HISTORY_LENGTH:
summary_instruction = create_story_summary(chat_history[:len(chat_history)-5])
if summary_instruction:
api_messages.append(summary_instruction)
for msg in formatted_history[-MEMORY_WINDOW:]:
api_messages.append(msg)
else:
for msg in formatted_history:
api_messages.append(msg)
else:
memory_length = MEMORY_WINDOW
if formatted_history:
for msg in formatted_history[-memory_length*2:]:
api_messages.append(msg)
# Add current user message
api_messages.append({"role": "user", "content": message})
# Special handling for story initialization
if not chat_history or message.lower() in ["start", "begin", "begin my adventure"]:
api_messages.append({
"role": "system",
"content": f"Begin a new {genre or 'fantasy'} adventure with an intriguing opening scene. Introduce the protagonist without assuming too much about them."
})
bot_message = ""
try:
for response_chunk in client.chat_completion(
api_messages,
max_tokens=MAX_TOKENS,
stream=True,
temperature=TEMPERATURE,
top_p=TOP_P,
):
delta = response_chunk.choices[0].delta.content
if delta:
bot_message += delta
# Only yield complete responses
if len(bot_message.strip()) >= MIN_RESPONSE_LENGTH and "Option 3:" in bot_message:
new_history = chat_history.copy()
new_history.append((message, bot_message))
yield format_history_for_gradio(new_history)
except Exception as e:
error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})"
broken_history = chat_history + [(message, error_message)]
yield format_history_for_gradio(broken_history)
def save_story(chat_history):
"""Convert chat history to markdown for download"""
if not chat_history:
return "No story to save yet!"
story_text = "# My Interactive Adventure\n\n"
for user_msg, bot_msg in chat_history:
story_text += f"**Player:** {user_msg}\n\n"
story_text += f"**Story:** {bot_msg}\n\n---\n\n"
return story_text
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🔮 Interactive Story Time")
gr.Markdown("Create a completely unique literary world, one choice at a time. Dare to explore the unknown.")
with gr.Row():
with gr.Column(scale=3):
# Chat window + user input
chatbot = gr.Chatbot(
height=700, # Increased height
bubble_full_width=True, # Allow bubbles to use full width
show_copy_button=True,
avatar_images=(None, "🧙"),
type="messages",
container=True,
scale=1,
min_width=800 # Ensure minimum width
)
msg = gr.Textbox(
placeholder="Describe what you want to do next in the story...",
container=False,
scale=4,
)
with gr.Row():
submit = gr.Button("Continue Story", variant="primary")
clear = gr.Button("Start New Adventure")
with gr.Column(scale=1):
gr.Markdown("## Adventure Settings")
genre = gr.Dropdown(
choices=list(GENRE_EXAMPLES.keys()),
label="Story Genre",
info="Choose the theme of your next adventure",
value="fantasy"
)
full_memory = gr.Checkbox(
label="Full Story Memory",
value=True,
info="When enabled, the AI tries to remember the entire story. If disabled, only the last few exchanges are used."
)
gr.Markdown("## Story Starters")
# Create four placeholder buttons for story starters
starter_btn1 = gr.Button("Starter 1")
starter_btn2 = gr.Button("Starter 2")
starter_btn3 = gr.Button("Starter 3")
starter_btn4 = gr.Button("Starter 4")
starter_buttons = [starter_btn1, starter_btn2, starter_btn3, starter_btn4]
# 1) We'll return a list of 4 dicts, each dict updating 'value' & 'visible'
def update_starter_buttons(selected_genre):
"""Update starter buttons with examples for the selected genre."""
examples = get_examples_for_genre(selected_genre)
results = []
for i in range(4):
if i < len(examples):
# Return just the string value instead of a dict
results.append(examples[i])
else:
results.append("") # Empty string for hidden buttons
return tuple(results) # Return tuple of strings
# 2) Initialize them with "fantasy" so they don't stay "Starter X" on page load
# We'll just call the function and store the results in a variable, then apply them in a .load() event
initial_button_data = update_starter_buttons("fantasy") # returns 4 dicts
# 3) We'll define a "pick_starter" function that sets msg to the chosen text
def pick_starter(starter_text, chat_history, selected_genre, memory_flag):
# Putting 'starter_text' into the msg
return starter_text
# 4) Connect each starter button:
for starter_button in starter_buttons:
starter_button.click(
fn=pick_starter,
inputs=[starter_button, chatbot, genre, full_memory],
outputs=[msg],
queue=False
).then(
fn=respond,
inputs=[msg, chatbot, genre, full_memory],
outputs=[chatbot],
queue=False
)
# 5) Dynamically update the 4 buttons if the user changes the genre
genre.change(
fn=update_starter_buttons,
inputs=[genre],
outputs=starter_buttons
)
# Handler for user input
msg.submit(respond, [msg, chatbot, genre, full_memory], [chatbot])
submit.click(respond, [msg, chatbot, genre, full_memory], [chatbot])
# Clear the chatbot for a new adventure
clear.click(lambda: [], None, chatbot, queue=False)
clear.click(lambda: "", None, msg, queue=False)
# "Download My Story" row
with gr.Row():
save_btn = gr.Button("Download My Story", variant="secondary")
story_output = gr.Markdown(visible=False)
save_btn.click(save_story, inputs=[chatbot], outputs=[story_output])
save_btn.click(
fn=lambda: True,
inputs=None,
outputs=story_output,
js="() => {document.getElementById('story_output').scrollIntoView();}",
queue=False
)
# 6) Finally, run a "load" event to apply initial_button_data to the 4 button outputs on page load
def load_initial_buttons():
# Just return our precomputed tuple of 4 dicts
return initial_button_data
demo.load(fn=load_initial_buttons, outputs=starter_buttons, queue=False)
# Run the app
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|