Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import random | |
| # Initialize the inference client | |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| # Story genres with genre-specific example prompts | |
| GENRE_EXAMPLES = { | |
| "fantasy": [ | |
| "I enter the ancient forest seeking the wizard's tower", | |
| "I approach the dragon cautiously with my shield raised", | |
| "I examine the mysterious runes carved into the stone altar", | |
| "I try to bargain with the elven council for safe passage" | |
| ], | |
| "sci-fi": [ | |
| "I hack into the space station's mainframe", | |
| "I investigate the strange signal coming from the abandoned planet", | |
| "I negotiate with the alien ambassador about the peace treaty", | |
| "I try to repair my damaged spacecraft before oxygen runs out" | |
| ], | |
| "mystery": [ | |
| "I examine the crime scene for overlooked evidence", | |
| "I question the nervous butler about the night of the murder", | |
| "I follow the suspicious figure through the foggy streets", | |
| "I check the victim's diary for hidden clues" | |
| ], | |
| "horror": [ | |
| "I slowly open the creaking door to the basement", | |
| "I read the forbidden text while the candles flicker", | |
| "I hide under the bed as footsteps approach", | |
| "I investigate the strange noises coming from the attic" | |
| ], | |
| "western": [ | |
| "I challenge the outlaw to a duel at high noon", | |
| "I track the bandits through the desert canyon", | |
| "I enter the saloon looking for information", | |
| "I defend the stagecoach from the approaching raiders" | |
| ], | |
| "cyberpunk": [ | |
| "I jack into the corporate mainframe to steal data", | |
| "I negotiate with the street gang for cybernetic upgrades", | |
| "I hide in the neon-lit alleyway from corporate security", | |
| "I meet my mysterious client in the underground bar" | |
| ], | |
| "historical": [ | |
| "I attend the royal ball hoping to meet the mysterious count", | |
| "I join the resistance against the occupying forces", | |
| "I navigate the dangerous politics of the royal court", | |
| "I set sail on a voyage to discover new lands" | |
| ], | |
| "post-apocalyptic": [ | |
| "I scavenge the abandoned shopping mall for supplies", | |
| "I approach the fortified settlement seeking shelter", | |
| "I navigate through the radioactive zone using my old map", | |
| "I hide from the approaching group of raiders" | |
| ], | |
| "steampunk": [ | |
| "I pilot my airship through the lightning storm", | |
| "I present my new invention to the Royal Academy", | |
| "I investigate the mysterious clockwork automaton", | |
| "I sneak aboard the emperor's armored train" | |
| ] | |
| } | |
| def get_examples_for_genre(genre): | |
| """Get example prompts specific to the selected genre""" | |
| if genre in GENRE_EXAMPLES: | |
| return GENRE_EXAMPLES[genre] | |
| else: | |
| return GENRE_EXAMPLES["fantasy"] # Default to fantasy | |
| def get_enhanced_system_prompt(genre=None): | |
| """Generate a detailed system prompt with optional genre specification""" | |
| selected_genre = genre or "fantasy" | |
| system_message = f"""You are an interactive storyteller creating an immersive {selected_genre} choose-your-own-adventure story. | |
| For each response: | |
| 1. Provide vivid sensory descriptions of the scene, environment, and characters | |
| 2. Include meaningful dialogue or internal monologue that reveals character motivations | |
| 3. End with exactly 3 different possible actions or decisions, each offering a distinct path | |
| 4. Maintain consistent world-building and character development | |
| 5. Incorporate appropriate atmosphere and tone for a {selected_genre} setting | |
| 6. Remember previous choices to create a coherent narrative arc | |
| Format your three options as: | |
| - Option 1: [Complete sentence describing a possible action] | |
| - Option 2: [Complete sentence describing a possible action] | |
| - Option 3: [Complete sentence describing a possible action] | |
| Keep responses engaging but concise (200-300 words maximum). If the user's input doesn't clearly indicate a choice, interpret their intent and move the story forward in the most logical direction.""" | |
| return system_message | |
| def create_story_summary(chat_history): | |
| """Create a concise summary of the story so far if the history gets too long""" | |
| if len(chat_history) <= 2: | |
| return None | |
| story_text = "" | |
| for user_msg, bot_msg in chat_history: | |
| story_text += f"User: {user_msg}\nStory: {bot_msg}\n\n" | |
| summary_instruction = { | |
| "role": "system", | |
| "content": "The conversation history is getting long. Please create a brief summary of the key plot points and character development so far to help maintain context without exceeding token limits." | |
| } | |
| return summary_instruction | |
| # NEW HELPER: Convert (user, bot) tuples into the "role/content" dictionaries Gradio needs | |
| def format_history_for_gradio(history_tuples): | |
| """ | |
| Convert a list of (user_msg, bot_msg) tuples | |
| into a list of dicts for 'type="messages"': | |
| [ | |
| {"role": "user", "content": "foo"}, | |
| {"role": "assistant", "content": "bar"}, | |
| ... | |
| ] | |
| """ | |
| messages = [] | |
| for user_msg, bot_msg in history_tuples: | |
| messages.append({"role": "user", "content": user_msg}) | |
| messages.append({"role": "assistant", "content": bot_msg}) | |
| return messages | |
| def respond(message, chat_history, genre=None, use_full_memory=True): | |
| """Generate a response based on the current message and conversation history""" | |
| system_message = get_enhanced_system_prompt(genre) | |
| # Convert your existing (user, bot) history into a format for the API request | |
| formatted_history = [] | |
| for user_msg, bot_msg in chat_history: | |
| formatted_history.append({"role": "user", "content": user_msg}) | |
| formatted_history.append({"role": "assistant", "content": bot_msg}) | |
| api_messages = [{"role": "system", "content": system_message}] | |
| # Use full memory or partial memory | |
| if use_full_memory and formatted_history: | |
| if len(formatted_history) > 20: | |
| summary_instruction = create_story_summary(chat_history[:len(chat_history)-5]) | |
| if summary_instruction: | |
| api_messages.append(summary_instruction) | |
| for msg in formatted_history[-10:]: | |
| api_messages.append(msg) | |
| else: | |
| for msg in formatted_history: | |
| api_messages.append(msg) | |
| else: | |
| memory_length = 10 | |
| if formatted_history: | |
| for msg in formatted_history[-memory_length*2:]: | |
| api_messages.append(msg) | |
| # Add current user message | |
| api_messages.append({"role": "user", "content": message}) | |
| # Special handling for story initialization | |
| if not chat_history or message.lower() in ["start", "begin", "begin my adventure"]: | |
| api_messages.append({ | |
| "role": "system", | |
| "content": f"Begin a new {genre or 'fantasy'} adventure with an intriguing opening scene. Introduce the protagonist without assuming too much about them." | |
| }) | |
| bot_message = "" | |
| try: | |
| for response_chunk in client.chat_completion( | |
| api_messages, | |
| max_tokens=512, | |
| stream=True, | |
| temperature=0.7, | |
| top_p=0.95, | |
| ): | |
| delta = response_chunk.choices[0].delta.content | |
| if delta: | |
| bot_message += delta | |
| # Keep storing your conversation as (user, bot) tuples | |
| new_history = chat_history.copy() | |
| new_history.append((message, bot_message)) | |
| # Yield in the new dictionary format for Gradio | |
| yield format_history_for_gradio(new_history) | |
| except Exception as e: | |
| error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})" | |
| broken_history = chat_history + [(message, error_message)] | |
| yield format_history_for_gradio(broken_history) | |
| def save_story(chat_history): | |
| """Convert chat history to markdown for download""" | |
| if not chat_history: | |
| return "No story to save yet!" | |
| story_text = "# My Interactive Adventure\n\n" | |
| for user_msg, bot_msg in chat_history: | |
| story_text += f"**Player:** {user_msg}\n\n" | |
| story_text += f"**Story:** {bot_msg}\n\n---\n\n" | |
| return story_text | |
| # Create interface with additional customization options | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 🔮 Interactive Story Adventure") | |
| gr.Markdown("Immerse yourself in an interactive story where your choices shape the narrative.") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| # Chat window + user input | |
| chatbot = gr.Chatbot( | |
| height=500, | |
| bubble_full_width=False, | |
| show_copy_button=True, | |
| avatar_images=(None, "🧙"), | |
| type="messages" # Use OpenAI-style messages | |
| ) | |
| msg = gr.Textbox( | |
| placeholder="Describe what you want to do next in the story...", | |
| container=False, | |
| scale=4, | |
| ) | |
| with gr.Row(): | |
| submit = gr.Button("Continue Story", variant="primary") | |
| clear = gr.Button("Start New Adventure") | |
| with gr.Column(scale=1): | |
| gr.Markdown("## Adventure Settings") | |
| genre = gr.Dropdown( | |
| choices=list(GENRE_EXAMPLES.keys()), | |
| label="Story Genre", | |
| info="Select a genre for your next adventure", | |
| value="fantasy" | |
| ) | |
| full_memory = gr.Checkbox( | |
| label="Full Story Memory", | |
| value=True, | |
| info="When enabled, the AI tries to remember the entire story. If disabled, only the last few exchanges are used." | |
| ) | |
| gr.Markdown("## Story Starters") | |
| # -- Create four placeholder buttons for story starters -- | |
| starter_btn1 = gr.Button("Starter 1") | |
| starter_btn2 = gr.Button("Starter 2") | |
| starter_btn3 = gr.Button("Starter 3") | |
| starter_btn4 = gr.Button("Starter 4") | |
| starter_buttons = [starter_btn1, starter_btn2, starter_btn3, starter_btn4] | |
| # Function to update the labels of the 4 starter buttons | |
| def update_starter_buttons(selected_genre): | |
| # Grab up to 4 examples from the chosen genre | |
| examples = get_examples_for_genre(selected_genre) | |
| button_updates = [] | |
| for i in range(4): | |
| if i < len(examples): | |
| button_updates.append(gr.Button.update(value=examples[i], visible=True)) | |
| else: | |
| button_updates.append(gr.Button.update(value="(no starter)", visible=False)) | |
| return button_updates | |
| # Function that populates the msg with the chosen starter text, then calls respond | |
| def pick_starter(starter_text, chat_history, selected_genre, memory_flag): | |
| return starter_text # This goes into 'msg' | |
| # Hook each starter button: | |
| # 1) Put the chosen text into 'msg' | |
| # 2) Then call 'respond' to update the chatbot | |
| for starter_button in starter_buttons: | |
| starter_button.click( | |
| fn=pick_starter, | |
| inputs=[starter_button, chatbot, genre, full_memory], | |
| outputs=[msg], | |
| queue=False | |
| ).then( | |
| fn=respond, | |
| inputs=[msg, chatbot, genre, full_memory], | |
| outputs=[chatbot], | |
| queue=False | |
| ) | |
| # Connect the genre dropdown to update these 4 starter buttons | |
| genre.change( | |
| fn=update_starter_buttons, | |
| inputs=[genre], | |
| outputs=starter_buttons | |
| ) | |
| # -- Chat submission + button events -- | |
| msg.submit(respond, [msg, chatbot, genre, full_memory], [chatbot]) | |
| submit.click(respond, [msg, chatbot, genre, full_memory], [chatbot]) | |
| # Clear the chatbot for a new adventure | |
| clear.click(lambda: [], None, chatbot, queue=False) | |
| clear.click(lambda: "", None, msg, queue=False) | |
| # -- "Download My Story" row -- | |
| with gr.Row(): | |
| save_btn = gr.Button("Download My Story", variant="secondary") | |
| story_output = gr.Markdown(visible=False) | |
| save_btn.click(save_story, inputs=[chatbot], outputs=[story_output]) | |
| save_btn.click( | |
| fn=lambda: True, | |
| inputs=None, | |
| outputs=story_output, | |
| js="() => {document.getElementById('story_output').scrollIntoView();}", | |
| queue=False | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |