Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -113,11 +113,11 @@ def generate_video(
|
|
113 |
progress=gr.Progress(track_tqdm=True),
|
114 |
):
|
115 |
"""
|
116 |
-
Generate a video from an input image using the Wan 2.2 14B I2V model with
|
117 |
|
118 |
This function takes an input image and generates a video animation based on the provided
|
119 |
-
prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with
|
120 |
-
for fast generation in
|
121 |
|
122 |
Args:
|
123 |
input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
|
@@ -181,8 +181,8 @@ def generate_video(
|
|
181 |
return video_path, current_seed
|
182 |
|
183 |
with gr.Blocks() as demo:
|
184 |
-
gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with
|
185 |
-
gr.Markdown("run Wan 2.2 in just 4-8 steps, with [
|
186 |
with gr.Row():
|
187 |
with gr.Column():
|
188 |
input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
|
|
|
113 |
progress=gr.Progress(track_tqdm=True),
|
114 |
):
|
115 |
"""
|
116 |
+
Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
|
117 |
|
118 |
This function takes an input image and generates a video animation based on the provided
|
119 |
+
prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
|
120 |
+
for fast generation in 4-8 steps.
|
121 |
|
122 |
Args:
|
123 |
input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
|
|
|
181 |
return video_path, current_seed
|
182 |
|
183 |
with gr.Blocks() as demo:
|
184 |
+
gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA")
|
185 |
+
gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
|
186 |
with gr.Row():
|
187 |
with gr.Column():
|
188 |
input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
|