PeterPinetree's picture
Update app.py
31ed2ea verified
raw
history blame
3.7 kB
import gradio as gr
import os
from huggingface_hub import InferenceClient
import random
from typing import List, Tuple
# Get token from environment variable
hf_token = os.environ.get("HF_TOKEN")
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
# Story genres and starter prompts
GENRE_EXAMPLES = {
"Fantasy": [
"You enter the ancient forest seeking the wizard's tower.",
"Approaching the dragon cautiously, you raise your shield."
],
"Sci-Fi": [
"Hacking into the space station's mainframe, you uncover a secret.",
"Investigating the strange signal from the abandoned planet, you find more than you expected."
],
"Mystery": [
"Examining the crime scene, you notice an overlooked clue.",
"Following the suspicious figure through foggy streets leads you to a discovery."
],
"Horror": [
"You slowly open the creaking door to the basement.",
"Reading the forbidden text, the candles flicker ominously."
]
}
# System prompt for generating stories
def get_story_prompt(genre: str) -> str:
return f"""You are an AI storyteller crafting an interactive {genre} adventure.
Write engaging scenes with vivid details and always end each response with three numbered choices.
Follow the format:
1. [Complete action-based choice]
2. [Complete action-based choice]
3. [Complete action-based choice]"""
# Generate initial story
def generate_story_intro(genre: str) -> str:
prompt = get_story_prompt(genre)
example = random.choice(GENRE_EXAMPLES[genre])
response = client.text_generation(prompt=f"{prompt}\n\nUser: {example}\nAssistant:", max_new_tokens=300)
return response[0]['generated_text'].strip()
# Continue the story
def continue_story(user_choice: str, history: List[Tuple[str, str]], genre: str) -> str:
prompt = get_story_prompt(genre)
conversation = "\n".join([f"User: {turn[0]}\nAssistant: {turn[1]}" for turn in history[-5:]])
full_prompt = f"{prompt}\n\n{conversation}\nUser: {user_choice}\nAssistant:"
response = client.text_generation(prompt=full_prompt, max_new_tokens=300)
return response[0]['generated_text'].strip()
# Reset the conversation
def reset_story() -> Tuple[List[Tuple[str, str]], str, str]:
return [], "", ""
# Gradio UI Setup
with gr.Blocks() as demo:
gr.Markdown(
"""
# 🌟 AI Story Studio
**Create Your Own Adventure with AI!**
Choose a genre, start your journey, and guide the story with your choices.
## 🕹️ How to Play:
1. **Pick a genre** from the dropdown.
2. **Start with a story prompt** or enter your own beginning.
3. **Choose from the AI's options** or type your own response to shape the adventure!
✨ *Tip: Your choices affect the story's outcome!*
"""
)
with gr.Row():
genre_dropdown = gr.Dropdown(
choices=list(GENRE_EXAMPLES.keys()), label="Select Genre", value="Fantasy"
)
start_story_btn = gr.Button("Start New Story")
chat_history = gr.Chatbot(height=400)
user_input = gr.Textbox(placeholder="Type your next move...")
with gr.Row():
submit_btn = gr.Button("Continue Story", variant="primary")
clear_btn = gr.Button("Reset")
# Function connections
start_story_btn.click(fn=generate_story_intro, inputs=[genre_dropdown], outputs=chat_history)
submit_btn.click(fn=continue_story, inputs=[user_input, chat_history, genre_dropdown], outputs=chat_history)
clear_btn.click(fn=reset_story, inputs=[], outputs=[chat_history, user_input])
# Launch the app
if __name__ == "__main__":
demo.launch()