Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,8 +6,8 @@ import os
|
|
6 |
# --- App Configuration ---
|
7 |
TITLE = "✍️ AI Story Outliner"
|
8 |
DESCRIPTION = """
|
9 |
-
Enter a prompt and get 10 unique story outlines from a
|
10 |
-
The app uses **
|
11 |
|
12 |
**How it works:**
|
13 |
1. Enter your story idea.
|
@@ -25,7 +25,7 @@ examples = [
|
|
25 |
]
|
26 |
|
27 |
# --- Model Initialization ---
|
28 |
-
# This section loads the
|
29 |
# It will automatically use the HF_TOKEN secret when deployed on Hugging Face Spaces.
|
30 |
generator = None
|
31 |
model_error = None
|
@@ -42,16 +42,15 @@ try:
|
|
42 |
# If no token is found, raise an error to prevent the app from crashing later.
|
43 |
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN secret in your Space settings.")
|
44 |
|
45 |
-
# Using '
|
46 |
generator = pipeline(
|
47 |
"text-generation",
|
48 |
-
model="
|
49 |
token=hf_token,
|
50 |
torch_dtype=torch.bfloat16, # More performant data type
|
51 |
-
device_map="auto"
|
52 |
-
trust_remote_code=True # Required for Phi-2 model
|
53 |
)
|
54 |
-
print("✅
|
55 |
|
56 |
except Exception as e:
|
57 |
model_error = e
|
@@ -77,10 +76,9 @@ def generate_stories(prompt: str) -> list[str]:
|
|
77 |
return [""] * 10
|
78 |
|
79 |
try:
|
80 |
-
#
|
81 |
-
story_prompt = f"""
|
82 |
-
The outline
|
83 |
-
Output:
|
84 |
### 🎬 The Hook
|
85 |
"""
|
86 |
|
@@ -89,8 +87,7 @@ Output:
|
|
89 |
"max_new_tokens": 250,
|
90 |
"num_return_sequences": 10,
|
91 |
"do_sample": True,
|
92 |
-
"temperature": 0.
|
93 |
-
"top_k": 50,
|
94 |
"top_p": 0.95,
|
95 |
"pad_token_id": generator.tokenizer.eos_token_id
|
96 |
}
|
@@ -107,8 +104,9 @@ Output:
|
|
107 |
full_text = out['generated_text']
|
108 |
# Add back the part of the prompt we want to see in the output
|
109 |
story_start = "### 🎬 The Hook\n"
|
110 |
-
|
111 |
-
|
|
|
112 |
|
113 |
# Ensure we return exactly 10 stories, padding if necessary.
|
114 |
while len(stories) < 10:
|
@@ -133,7 +131,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !i
|
|
133 |
with gr.Column(scale=1):
|
134 |
input_area = gr.TextArea(
|
135 |
lines=5,
|
136 |
-
label="Your Story Prompt
|
137 |
placeholder="e.g., 'The last dragon on Earth lived not in a cave, but in a library...'"
|
138 |
)
|
139 |
generate_button = gr.Button("Generate 10 Outlines ✨", variant="primary")
|
@@ -167,4 +165,4 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !i
|
|
167 |
)
|
168 |
|
169 |
if __name__ == "__main__":
|
170 |
-
demo.launch()
|
|
|
6 |
# --- App Configuration ---
|
7 |
TITLE = "✍️ AI Story Outliner"
|
8 |
DESCRIPTION = """
|
9 |
+
Enter a prompt and get 10 unique story outlines from a powerful AI model.
|
10 |
+
The app uses **Mistral-7B-v0.1**, a popular and capable open-source model, to generate creative outlines.
|
11 |
|
12 |
**How it works:**
|
13 |
1. Enter your story idea.
|
|
|
25 |
]
|
26 |
|
27 |
# --- Model Initialization ---
|
28 |
+
# This section loads the Mistral-7B model, which requires authentication.
|
29 |
# It will automatically use the HF_TOKEN secret when deployed on Hugging Face Spaces.
|
30 |
generator = None
|
31 |
model_error = None
|
|
|
42 |
# If no token is found, raise an error to prevent the app from crashing later.
|
43 |
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN secret in your Space settings.")
|
44 |
|
45 |
+
# Using 'mistralai/Mistral-7B-v0.1'. This model is gated and requires a token.
|
46 |
generator = pipeline(
|
47 |
"text-generation",
|
48 |
+
model="mistralai/Mistral-7B-v0.1",
|
49 |
token=hf_token,
|
50 |
torch_dtype=torch.bfloat16, # More performant data type
|
51 |
+
device_map="auto" # Will use GPU if available, otherwise CPU
|
|
|
52 |
)
|
53 |
+
print("✅ mistralai/Mistral-7B-v0.1 model loaded successfully!")
|
54 |
|
55 |
except Exception as e:
|
56 |
model_error = e
|
|
|
76 |
return [""] * 10
|
77 |
|
78 |
try:
|
79 |
+
# This prompt format is optimized for Mistral instruct models.
|
80 |
+
story_prompt = f"""[INST] Create a short story outline based on this idea: "{prompt}"
|
81 |
+
The outline must have three parts: a dramatic hook, a concise ballad, and a satisfying finale. Use emojis for each section header. [/INST]
|
|
|
82 |
### 🎬 The Hook
|
83 |
"""
|
84 |
|
|
|
87 |
"max_new_tokens": 250,
|
88 |
"num_return_sequences": 10,
|
89 |
"do_sample": True,
|
90 |
+
"temperature": 0.8,
|
|
|
91 |
"top_p": 0.95,
|
92 |
"pad_token_id": generator.tokenizer.eos_token_id
|
93 |
}
|
|
|
104 |
full_text = out['generated_text']
|
105 |
# Add back the part of the prompt we want to see in the output
|
106 |
story_start = "### 🎬 The Hook\n"
|
107 |
+
# Split by the instruction closing tag to get only the model's response
|
108 |
+
generated_part = full_text.split("[/INST]")[-1].strip()
|
109 |
+
stories.append(generated_part)
|
110 |
|
111 |
# Ensure we return exactly 10 stories, padding if necessary.
|
112 |
while len(stories) < 10:
|
|
|
131 |
with gr.Column(scale=1):
|
132 |
input_area = gr.TextArea(
|
133 |
lines=5,
|
134 |
+
label="Your Story Prompt 👇",
|
135 |
placeholder="e.g., 'The last dragon on Earth lived not in a cave, but in a library...'"
|
136 |
)
|
137 |
generate_button = gr.Button("Generate 10 Outlines ✨", variant="primary")
|
|
|
165 |
)
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
+
demo.launch()
|