Spaces:
Running
on
Zero
Running
on
Zero
v2
Browse files- demo_app.py +50 -18
demo_app.py
CHANGED
@@ -47,8 +47,8 @@ pipe.vae = pipe.vae.to("cuda")
|
|
47 |
pipe = pipe.to("cuda")
|
48 |
|
49 |
pipe.load_lora_weights(
|
50 |
-
"
|
51 |
-
weight_name="
|
52 |
adapter_name="hyvid_lora_adapter"
|
53 |
)
|
54 |
pipe.set_adapters("hyvid_lora_adapter", 1.2)
|
@@ -59,7 +59,7 @@ torch.cuda.empty_cache()
|
|
59 |
MAX_SEED = np.iinfo(np.int32).max
|
60 |
MAX_IMAGE_SIZE = 1024
|
61 |
|
62 |
-
@spaces.GPU(duration=120)
|
63 |
def generate(
|
64 |
prompt,
|
65 |
height,
|
@@ -86,13 +86,18 @@ def generate(
|
|
86 |
).frames[0]
|
87 |
|
88 |
output_path = "output.mp4"
|
89 |
-
export_to_video(output, output_path, fps=fps)
|
90 |
torch.cuda.empty_cache()
|
91 |
gc.collect()
|
92 |
return output_path
|
93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
-
# Gradio Interface
|
96 |
css = """
|
97 |
#col-container {
|
98 |
margin: 0 auto;
|
@@ -131,6 +136,18 @@ css = """
|
|
131 |
padding: 15px;
|
132 |
border-radius: 8px;
|
133 |
margin-bottom: 1em;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
}
|
135 |
|
136 |
.support-text {
|
@@ -157,17 +174,25 @@ with gr.Blocks(css=css, theme="dark") as demo:
|
|
157 |
"""Transform your text descriptions into anime-style videos using state-of-the-art AI technology.
|
158 |
This space uses the HunyuanVideo model to generate high-quality animated sequences.
|
159 |
|
160 |
-
If you find this useful, please consider
|
161 |
elem_classes=["description"]
|
162 |
)
|
163 |
|
164 |
-
with gr.
|
165 |
-
prompt = gr.
|
166 |
label="Prompt",
|
167 |
placeholder="Enter your prompt here (e.g., 'a cute anime girl walking in a garden')",
|
168 |
show_label=False,
|
|
|
|
|
169 |
)
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
with gr.Row():
|
173 |
result = gr.Video(label="Generated Video")
|
@@ -181,19 +206,19 @@ with gr.Blocks(css=css, theme="dark") as demo:
|
|
181 |
value=-1,
|
182 |
)
|
183 |
with gr.Row():
|
184 |
-
height = gr.Slider(
|
185 |
label="Height",
|
186 |
minimum=256,
|
187 |
maximum=MAX_IMAGE_SIZE,
|
188 |
-
step=16,
|
189 |
-
value=
|
190 |
)
|
191 |
width = gr.Slider(
|
192 |
label="Width",
|
193 |
minimum=256,
|
194 |
maximum=MAX_IMAGE_SIZE,
|
195 |
step=16,
|
196 |
-
value=
|
197 |
)
|
198 |
with gr.Row():
|
199 |
num_frames = gr.Slider(
|
@@ -201,30 +226,37 @@ with gr.Blocks(css=css, theme="dark") as demo:
|
|
201 |
minimum=1.0,
|
202 |
maximum=257.0,
|
203 |
step=1,
|
204 |
-
value=
|
205 |
)
|
206 |
num_inference_steps = gr.Slider(
|
207 |
label="Number of inference steps",
|
208 |
minimum=1,
|
209 |
maximum=50,
|
210 |
step=1,
|
211 |
-
value=
|
212 |
)
|
213 |
fps = gr.Slider(
|
214 |
label="Frames per second",
|
215 |
minimum=1,
|
216 |
maximum=60,
|
217 |
step=1,
|
218 |
-
value=
|
219 |
)
|
220 |
|
221 |
# Event handling
|
222 |
run_button.click(
|
223 |
fn=generate,
|
224 |
inputs=[prompt, height, width, num_frames, num_inference_steps, seed, fps],
|
225 |
-
# Added fps to inputs, fixed height/width order
|
226 |
outputs=[result],
|
227 |
)
|
228 |
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
-
|
|
|
|
|
|
|
|
47 |
pipe = pipe.to("cuda")
|
48 |
|
49 |
pipe.load_lora_weights(
|
50 |
+
"sergidev/IllustrationTTV",
|
51 |
+
weight_name="hunyuan_flat_color_v2.safetensors",
|
52 |
adapter_name="hyvid_lora_adapter"
|
53 |
)
|
54 |
pipe.set_adapters("hyvid_lora_adapter", 1.2)
|
|
|
59 |
MAX_SEED = np.iinfo(np.int32).max
|
60 |
MAX_IMAGE_SIZE = 1024
|
61 |
|
62 |
+
@spaces.GPU(duration=120)
|
63 |
def generate(
|
64 |
prompt,
|
65 |
height,
|
|
|
86 |
).frames[0]
|
87 |
|
88 |
output_path = "output.mp4"
|
89 |
+
export_to_video(output, output_path, fps=fps)
|
90 |
torch.cuda.empty_cache()
|
91 |
gc.collect()
|
92 |
return output_path
|
93 |
|
94 |
+
def apply_preset(preset_name, *current_values):
|
95 |
+
if preset_name == "Higher Resolution":
|
96 |
+
return [608, 448, 24, 29, 12]
|
97 |
+
elif preset_name == "More Frames":
|
98 |
+
return [512, 320, 42, 30, 14]
|
99 |
+
return current_values
|
100 |
|
|
|
101 |
css = """
|
102 |
#col-container {
|
103 |
margin: 0 auto;
|
|
|
136 |
padding: 15px;
|
137 |
border-radius: 8px;
|
138 |
margin-bottom: 1em;
|
139 |
+
width: 100%;
|
140 |
+
}
|
141 |
+
|
142 |
+
.prompt-textbox {
|
143 |
+
min-height: 80px !important;
|
144 |
+
}
|
145 |
+
|
146 |
+
.preset-buttons {
|
147 |
+
display: flex;
|
148 |
+
gap: 10px;
|
149 |
+
justify-content: center;
|
150 |
+
margin-bottom: 1em;
|
151 |
}
|
152 |
|
153 |
.support-text {
|
|
|
174 |
"""Transform your text descriptions into anime-style videos using state-of-the-art AI technology.
|
175 |
This space uses the HunyuanVideo model to generate high-quality animated sequences.
|
176 |
|
177 |
+
If you find this useful, please consider giving the space a ❤️ and supporting me on [Ko-Fi](https://ko-fi.com/sergidev)!""",
|
178 |
elem_classes=["description"]
|
179 |
)
|
180 |
|
181 |
+
with gr.Column(elem_classes=["prompt-container"]):
|
182 |
+
prompt = gr.Textbox(
|
183 |
label="Prompt",
|
184 |
placeholder="Enter your prompt here (e.g., 'a cute anime girl walking in a garden')",
|
185 |
show_label=False,
|
186 |
+
elem_classes=["prompt-textbox"],
|
187 |
+
lines=3
|
188 |
)
|
189 |
+
|
190 |
+
with gr.Row():
|
191 |
+
run_button = gr.Button("🎨 Generate", variant="primary", size="lg")
|
192 |
+
|
193 |
+
with gr.Row(elem_classes=["preset-buttons"]):
|
194 |
+
preset_high_res = gr.Button("📺 Higher Resolution Preset")
|
195 |
+
preset_more_frames = gr.Button("🎞️ More Frames Preset")
|
196 |
|
197 |
with gr.Row():
|
198 |
result = gr.Video(label="Generated Video")
|
|
|
206 |
value=-1,
|
207 |
)
|
208 |
with gr.Row():
|
209 |
+
height = gr.Slider(
|
210 |
label="Height",
|
211 |
minimum=256,
|
212 |
maximum=MAX_IMAGE_SIZE,
|
213 |
+
step=16,
|
214 |
+
value=608,
|
215 |
)
|
216 |
width = gr.Slider(
|
217 |
label="Width",
|
218 |
minimum=256,
|
219 |
maximum=MAX_IMAGE_SIZE,
|
220 |
step=16,
|
221 |
+
value=448,
|
222 |
)
|
223 |
with gr.Row():
|
224 |
num_frames = gr.Slider(
|
|
|
226 |
minimum=1.0,
|
227 |
maximum=257.0,
|
228 |
step=1,
|
229 |
+
value=24,
|
230 |
)
|
231 |
num_inference_steps = gr.Slider(
|
232 |
label="Number of inference steps",
|
233 |
minimum=1,
|
234 |
maximum=50,
|
235 |
step=1,
|
236 |
+
value=29,
|
237 |
)
|
238 |
fps = gr.Slider(
|
239 |
label="Frames per second",
|
240 |
minimum=1,
|
241 |
maximum=60,
|
242 |
step=1,
|
243 |
+
value=12,
|
244 |
)
|
245 |
|
246 |
# Event handling
|
247 |
run_button.click(
|
248 |
fn=generate,
|
249 |
inputs=[prompt, height, width, num_frames, num_inference_steps, seed, fps],
|
|
|
250 |
outputs=[result],
|
251 |
)
|
252 |
|
253 |
+
# Preset button handlers
|
254 |
+
preset_high_res.click(
|
255 |
+
fn=lambda: apply_preset("Higher Resolution"),
|
256 |
+
outputs=[height, width, num_frames, num_inference_steps, fps]
|
257 |
+
)
|
258 |
|
259 |
+
preset_more_frames.click(
|
260 |
+
fn=lambda: apply_preset("More Frames"),
|
261 |
+
outputs=[height, width, num_frames, num_inference_steps, fps]
|
262 |
+
)
|