Spaces:
Running
Running
File size: 6,777 Bytes
59e84d3 5fa7137 59e84d3 5fa7137 59e84d3 5fa7137 f873e60 1084118 6a73811 f873e60 5fa7137 59e84d3 1084118 59e84d3 1084118 371dba5 1084118 59e84d3 5fa7137 6a73811 f88b464 95de190 5fa7137 f873e60 5fa7137 f88b464 6a73811 f873e60 6a73811 f88b464 6a73811 f873e60 6a73811 5fa7137 95de190 47bd397 5fa7137 f873e60 a32d7a4 95de190 a32d7a4 95de190 1084118 f873e60 a32d7a4 6a73811 92cb880 f873e60 a32d7a4 6a73811 a32d7a4 6a73811 a32d7a4 5fa7137 1084118 47bd397 5fa7137 1084118 6a73811 1084118 5fa7137 f873e60 5fa7137 f873e60 47bd397 5fa7137 1084118 5fa7137 47bd397 5fa7137 1084118 5fa7137 f873e60 5fa7137 47bd397 5fa7137 6a73811 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import gradio as gr
import torch
from transformers import pipeline
import os
# --- App Configuration ---
TITLE = "✍️ AI Story Outliner"
DESCRIPTION = """
Enter a prompt and get 10 unique story outlines from a powerful AI model.
The app uses **Mistral-7B-v0.1**, a popular and capable open-source model, to generate creative outlines.
**How it works:**
1. Enter your story idea.
2. The AI will generate 10 different story outlines.
3. Each outline has a dramatic beginning and is concise, like a song.
"""
# --- Example Prompts for Storytelling ---
examples = [
["The old lighthouse keeper stared into the storm. He'd seen many tempests, but this one was different. This one had eyes..."],
["In a city powered by dreams, a young inventor creates a machine that can record them. His first recording reveals a nightmare that doesn't belong to him."],
["The knight adjusted his helmet, the dragon's roar echoing in the valley. He was ready for the fight, but for what the dragon said when it finally spoke."],
["She found the old leather-bound journal in her grandfather's attic. The first entry read: 'To relieve stress, I walk in the woods. But today, the woods walked with me.'"],
["The meditation app promised to help her 'delete unhelpful thoughts.' She tapped the button, and to her horror, the memory of her own name began to fade..."]
]
# --- Model Initialization ---
# This section loads the Mistral-7B model, which requires authentication.
# It will automatically use the HF_TOKEN secret when deployed on Hugging Face Spaces.
generator = None
model_error = None
try:
print("Initializing model... This may take a moment.")
# Explicitly load the token from environment variables (for HF Spaces secrets).
hf_token = os.environ.get("HF_TOKEN")
if hf_token:
print("✅ HF_TOKEN secret found.")
else:
# If no token is found, raise an error to prevent the app from crashing later.
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN secret in your Space settings.")
# Using 'mistralai/Mistral-7B-v0.1'. This model is gated and requires a token.
generator = pipeline(
"text-generation",
model="mistralai/Mistral-7B-v0.1",
token=hf_token,
torch_dtype=torch.bfloat16, # More performant data type
device_map="auto" # Will use GPU if available, otherwise CPU
)
print("✅ mistralai/Mistral-7B-v0.1 model loaded successfully!")
except Exception as e:
model_error = e
print(f"--- 🚨 Error loading model ---")
print(f"Error: {model_error}")
# --- App Logic ---
def generate_stories(prompt: str) -> list[str]:
"""
Generates 10 story outlines from the loaded model based on the user's prompt.
"""
print("--- Button clicked. Attempting to generate stories... ---")
# If the model failed to load during startup, display that error.
if model_error:
error_message = f"**Model failed to load during startup.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
print(f"Returning startup error: {error_message}")
return [error_message] * 10
if not prompt:
# Return a list of 10 empty strings to clear the outputs
return [""] * 10
try:
# This prompt format is optimized for Mistral instruct models.
story_prompt = f"""[INST] Create a short story outline based on this idea: "{prompt}"
The outline must have three parts: a dramatic hook, a concise ballad, and a satisfying finale. Use emojis for each section header. [/INST]
### 🎬 The Hook
"""
# Parameters for the pipeline to generate 10 diverse results.
params = {
"max_new_tokens": 250,
"num_return_sequences": 10,
"do_sample": True,
"temperature": 0.8,
"top_p": 0.95,
"pad_token_id": generator.tokenizer.eos_token_id
}
print("Generating text with the model...")
# Generate 10 different story variations
outputs = generator(story_prompt, **params)
print("✅ Text generation complete.")
# Extract the generated text.
stories = []
for out in outputs:
# The model will generate the prompt plus the continuation. We extract just the new part.
full_text = out['generated_text']
# Add back the part of the prompt we want to see in the output
story_start = "### 🎬 The Hook\n"
# Split by the instruction closing tag to get only the model's response
generated_part = full_text.split("[/INST]")[-1].strip()
stories.append(generated_part)
# Ensure we return exactly 10 stories, padding if necessary.
while len(stories) < 10:
stories.append("Failed to generate a story for this slot.")
return stories
except Exception as e:
# Catch any errors that happen DURING generation and display them in the UI.
print(f"--- 🚨 Error during story generation ---")
print(f"Error: {e}")
runtime_error_message = f"**An error occurred during story generation.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(e)}`"
return [runtime_error_message] * 10
# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !important;}") as demo:
gr.Markdown(f"<h1 style='text-align: center;'>{TITLE}</h1>")
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column(scale=1):
input_area = gr.TextArea(
lines=5,
label="Your Story Prompt 👇",
placeholder="e.g., 'The last dragon on Earth lived not in a cave, but in a library...'"
)
generate_button = gr.Button("Generate 10 Outlines ✨", variant="primary")
gr.Markdown("---")
gr.Markdown("## 📖 Your 10 Story Outlines")
# Create 10 markdown components to display the stories in two columns
story_outputs = []
with gr.Row():
with gr.Column():
for i in range(5):
md = gr.Markdown(label=f"Story Outline {i + 1}")
story_outputs.append(md)
with gr.Column():
for i in range(5, 10):
md = gr.Markdown(label=f"Story Outline {i + 1}")
story_outputs.append(md)
gr.Examples(
examples=examples,
inputs=input_area,
label="Example Story Starters (Click to use)"
)
generate_button.click(
fn=generate_stories,
inputs=input_area,
outputs=story_outputs,
api_name="generate"
)
if __name__ == "__main__":
demo.launch()
|