Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -60,7 +60,7 @@ optimize_pipeline_(pipe,
|
|
60 |
)
|
61 |
|
62 |
|
63 |
-
|
64 |
default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
|
65 |
|
66 |
|
@@ -90,11 +90,11 @@ def generate_video(
|
|
90 |
progress=gr.Progress(track_tqdm=True),
|
91 |
):
|
92 |
"""
|
93 |
-
Generate a video from
|
94 |
|
95 |
-
This function takes an input
|
96 |
-
prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B
|
97 |
-
for fast generation in
|
98 |
|
99 |
Args:
|
100 |
prompt (str): Text prompt describing the desired animation or motion.
|
@@ -153,18 +153,18 @@ def generate_video(
|
|
153 |
return video_path, current_seed
|
154 |
|
155 |
with gr.Blocks() as demo:
|
156 |
-
gr.Markdown("# Fast
|
157 |
-
gr.Markdown("run Wan 2.2 in just
|
158 |
with gr.Row():
|
159 |
with gr.Column():
|
160 |
-
prompt_input = gr.Textbox(label="Prompt", value=
|
161 |
duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=MAX_DURATION, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
|
162 |
|
163 |
with gr.Accordion("Advanced Settings", open=False):
|
164 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
165 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
166 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
167 |
-
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=
|
168 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
|
169 |
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=3, label="Guidance Scale 2 - low noise stage")
|
170 |
|
|
|
60 |
)
|
61 |
|
62 |
|
63 |
+
default_prompt_t2v = "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage."
|
64 |
default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
|
65 |
|
66 |
|
|
|
90 |
progress=gr.Progress(track_tqdm=True),
|
91 |
):
|
92 |
"""
|
93 |
+
Generate a video from a text prompt using the Wan 2.2 14B T2V model with Lightning LoRA.
|
94 |
|
95 |
+
This function takes an input prompt and generates a video animation based on the provided
|
96 |
+
prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Text-to-Video model with Lightning LoRA
|
97 |
+
for fast generation in 4-8 steps.
|
98 |
|
99 |
Args:
|
100 |
prompt (str): Text prompt describing the desired animation or motion.
|
|
|
153 |
return video_path, current_seed
|
154 |
|
155 |
with gr.Blocks() as demo:
|
156 |
+
gr.Markdown("# Fast 4 steps Wan 2.2 T2V (14B) with Lightning LoRA")
|
157 |
+
gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Wan 2.2 Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), compatible with 🧨 diffusers")
|
158 |
with gr.Row():
|
159 |
with gr.Column():
|
160 |
+
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_t2v)
|
161 |
duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=MAX_DURATION, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
|
162 |
|
163 |
with gr.Accordion("Advanced Settings", open=False):
|
164 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
165 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
166 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
167 |
+
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
|
168 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
|
169 |
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=3, label="Guidance Scale 2 - low noise stage")
|
170 |
|