Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import random | |
# Initialize the inference client | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# Story genres with genre-specific example prompts | |
GENRE_EXAMPLES = { | |
"fantasy": [ | |
"I enter the ancient forest seeking the wizard's tower", | |
"I approach the dragon cautiously with my shield raised", | |
"I examine the mysterious runes carved into the stone altar", | |
"I try to bargain with the elven council for safe passage" | |
], | |
"sci-fi": [ | |
"I hack into the space station's mainframe", | |
"I investigate the strange signal coming from the abandoned planet", | |
"I negotiate with the alien ambassador about the peace treaty", | |
"I try to repair my damaged spacecraft before oxygen runs out" | |
], | |
"mystery": [ | |
"I examine the crime scene for overlooked evidence", | |
"I question the nervous butler about the night of the murder", | |
"I follow the suspicious figure through the foggy streets", | |
"I check the victim's diary for hidden clues" | |
], | |
"horror": [ | |
"I slowly open the creaking door to the basement", | |
"I read the forbidden text while the candles flicker", | |
"I hide under the bed as footsteps approach", | |
"I investigate the strange noises coming from the attic" | |
], | |
"western": [ | |
"I challenge the outlaw to a duel at high noon", | |
"I track the bandits through the desert canyon", | |
"I enter the saloon looking for information", | |
"I defend the stagecoach from the approaching raiders" | |
], | |
"cyberpunk": [ | |
"I jack into the corporate mainframe to steal data", | |
"I negotiate with the street gang for cybernetic upgrades", | |
"I hide in the neon-lit alleyway from corporate security", | |
"I meet my mysterious client in the underground bar" | |
], | |
"historical": [ | |
"I attend the royal ball hoping to meet the mysterious count", | |
"I join the resistance against the occupying forces", | |
"I navigate the dangerous politics of the royal court", | |
"I set sail on a voyage to discover new lands" | |
], | |
"post-apocalyptic": [ | |
"I scavenge the abandoned shopping mall for supplies", | |
"I approach the fortified settlement seeking shelter", | |
"I navigate through the radioactive zone using my old map", | |
"I hide from the approaching group of raiders" | |
], | |
"steampunk": [ | |
"I pilot my airship through the lightning storm", | |
"I present my new invention to the Royal Academy", | |
"I investigate the mysterious clockwork automaton", | |
"I sneak aboard the emperor's armored train" | |
] | |
} | |
def get_examples_for_genre(genre): | |
"""Get example prompts specific to the selected genre""" | |
if genre in GENRE_EXAMPLES: | |
return GENRE_EXAMPLES[genre] | |
else: | |
return GENRE_EXAMPLES["fantasy"] # Default to fantasy | |
def get_enhanced_system_prompt(genre=None): | |
"""Generate a detailed system prompt with optional genre specification""" | |
selected_genre = genre or "fantasy" | |
system_message = f"""You are an interactive storyteller creating an immersive {selected_genre} choose-your-own-adventure story. | |
For each response: | |
1. Provide vivid sensory descriptions of the scene, environment, and characters | |
2. Include meaningful dialogue or internal monologue that reveals character motivations | |
3. End with exactly 3 different possible actions or decisions, each offering a distinct path | |
4. Maintain consistent world-building and character development | |
5. Incorporate appropriate atmosphere and tone for a {selected_genre} setting | |
6. Remember previous choices to create a coherent narrative arc | |
Format your three options as: | |
- Option 1: [Complete sentence describing a possible action] | |
- Option 2: [Complete sentence describing a possible action] | |
- Option 3: [Complete sentence describing a possible action] | |
Keep responses engaging but concise (200-300 words maximum). If the user's input doesn't clearly indicate a choice, interpret their intent and move the story forward in the most logical direction.""" | |
return system_message | |
def create_story_summary(chat_history): | |
"""Create a concise summary of the story so far if the history gets too long""" | |
if len(chat_history) <= 2: # Not enough history to summarize | |
return None | |
# Extract just the content for summarization | |
story_text = "" | |
for user_msg, bot_msg in chat_history: | |
story_text += f"User: {user_msg}\nStory: {bot_msg}\n\n" | |
# Add a summary instruction | |
summary_instruction = { | |
"role": "system", | |
"content": "The conversation history is getting long. Please create a brief summary of the key plot points and character development so far to help maintain context without exceeding token limits." | |
} | |
return summary_instruction | |
def respond(message, chat_history, genre=None, use_full_memory=True): | |
"""Generate a response based on the current message and conversation history""" | |
# Use a more detailed system prompt | |
system_message = get_enhanced_system_prompt(genre) | |
# Format history correctly for the API | |
formatted_history = [] | |
for user_msg, bot_msg in chat_history: | |
formatted_history.append({"role": "user", "content": user_msg}) | |
formatted_history.append({"role": "assistant", "content": bot_msg}) | |
# Create proper API messages | |
api_messages = [{"role": "system", "content": system_message}] | |
# Token management strategy | |
# Option 1: Use full history but potentially hit token limits | |
if use_full_memory and formatted_history: | |
# If there's a lot of history, we might need a summary | |
if len(formatted_history) > 20: # Arbitrary threshold, adjust as needed | |
summary_instruction = create_story_summary(chat_history[:len(chat_history)-5]) | |
if summary_instruction: | |
api_messages.append(summary_instruction) | |
# Add only the most recent exchanges after the summary | |
for msg in formatted_history[-10:]: | |
api_messages.append(msg) | |
else: | |
# Add all history if it's not too long | |
for msg in formatted_history: | |
api_messages.append(msg) | |
# Option 2: Limited history - fallback if full memory is disabled | |
else: | |
# Set a larger memory length but still have a limit | |
memory_length = 10 # Increased from 5 | |
if formatted_history: | |
for msg in formatted_history[-memory_length*2:]: | |
api_messages.append(msg) | |
# Add current message | |
api_messages.append({"role": "user", "content": message}) | |
# Special handling for story initialization | |
if not chat_history or message.lower() in ["start", "begin", "begin my adventure"]: | |
# Add a specific instruction for starting a new story | |
api_messages.append({ | |
"role": "system", | |
"content": f"Begin a new {genre or 'fantasy'} adventure with an intriguing opening scene. Introduce the protagonist without assuming too much about them, allowing the user to shape the character." | |
}) | |
# Generate and stream response | |
bot_message = "" | |
try: | |
for response in client.chat_completion( | |
api_messages, | |
max_tokens=512, | |
stream=True, | |
temperature=0.7, | |
top_p=0.95, | |
): | |
delta = response.choices[0].delta.content | |
if delta: | |
bot_message += delta | |
# Create a new list for the updated chat history | |
new_history = chat_history.copy() | |
new_history.append((message, bot_message)) | |
yield new_history | |
except Exception as e: | |
error_message = f"Story magic temporarily interrupted. Please try again. (Error: {str(e)})" | |
yield chat_history + [(message, error_message)] | |
def update_examples(genre): | |
"""Update the example prompts based on the selected genre""" | |
examples = get_examples_for_genre(genre) | |
examples.insert(0, "Begin my adventure") # Always include this option | |
return gr.Examples( | |
examples=[[ex] for ex in examples], | |
inputs=msg | |
) | |
# Create interface with additional customization options | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# 🔮 Interactive Story Adventure") | |
gr.Markdown("Immerse yourself in an interactive story where your choices shape the narrative.") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
chatbot = gr.Chatbot( | |
height=500, | |
bubble_full_width=False, | |
show_copy_button=True, | |
avatar_images=(None, "🧙"), | |
) | |
msg = gr.Textbox( | |
placeholder="Describe what you want to do next in the story...", | |
container=False, | |
scale=4, | |
) | |
with gr.Row(): | |
submit = gr.Button("Continue Story", variant="primary") | |
clear = gr.Button("Start New Adventure") | |
with gr.Column(scale=1): | |
gr.Markdown("## Adventure Settings") | |
genre = gr.Dropdown( | |
choices=list(GENRE_EXAMPLES.keys()), | |
label="Story Genre", | |
info="Select a genre for your next adventure", | |
value="fantasy" | |
) | |
# Add a memory toggle option | |
full_memory = gr.Checkbox( | |
label="Full Story Memory", | |
value=True, | |
info="When enabled, the AI will try to remember the entire story" | |
) | |
# Initialize with fantasy examples | |
examples_box = gr.Examples( | |
examples=[[ex] for ex in get_examples_for_genre("fantasy")], | |
inputs=msg, | |
label="Examples" | |
) | |
# Update examples when genre changes | |
def update_example_text(genre): | |
return "\n".join(get_examples_for_genre(genre)) | |
examples_textbox = gr.Textbox( | |
value=update_example_text("fantasy"), | |
label="Example Story Starters", | |
interactive=False | |
) | |
genre.change( | |
fn=update_example_text, | |
inputs=[genre], | |
outputs=[examples_textbox] | |
) | |
# Set up event handlers for the chatbot | |
msg.submit(respond, [msg, chatbot, genre, full_memory], [chatbot]) | |
submit.click(respond, [msg, chatbot, genre, full_memory], [chatbot]) | |
# Clear the chatbot for a new adventure | |
clear.click(lambda: [], None, chatbot, queue=False) | |
clear.click(lambda: "", None, msg, queue=False) | |
# Add a download story button | |
def save_story(chat_history): | |
if not chat_history: | |
return "No story to save yet!" | |
story_text = "# My Interactive Adventure\n\n" | |
for user_msg, bot_msg in chat_history: | |
story_text += f"**Player:** {user_msg}\n\n" | |
story_text += f"**Story:** {bot_msg}\n\n---\n\n" | |
return story_text | |
with gr.Row(): | |
save_btn = gr.Button("Save Story as Markdown", variant="secondary") | |
story_output = gr.Markdown(visible=False) | |
save_btn.click(save_story, inputs=[chatbot], outputs=[story_output]) | |
save_btn.click(lambda: True, None, story_output, js="() => {document.getElementById('story_output').scrollIntoView();}", queue=False) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=7860) |