rahul7star commited on
Commit
6328f6d
·
verified ·
1 Parent(s): 23b71a7

Create app_14bt2v.py

Browse files
Files changed (1) hide show
  1. app_14bt2v.py +66 -0
app_14bt2v.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import gradio as gr
4
+ import torch
5
+ from diffusers import WanPipeline, AutoencoderKLWan
6
+ from diffusers.utils import export_to_video
7
+
8
+ # Model setup
9
+ dtype = torch.bfloat16
10
+ device = "cuda"
11
+ model_id = "FastDM/Wan2.2-T2V-A14B-Merge-Lightning-V1.0-Diffusers"
12
+
13
+ print("Loading model... this may take a while.")
14
+ vae = AutoencoderKLWan.from_pretrained(
15
+ model_id, subfolder="vae", torch_dtype=torch.float32
16
+ )
17
+ pipe = WanPipeline.from_pretrained(
18
+ model_id, vae=vae, torch_dtype=dtype
19
+ ).to(device)
20
+
21
+ # Default values
22
+ DEFAULT_PROMPT = "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage."
23
+ DEFAULT_NEGATIVE = "bad quality, blurry, distorted, extra limbs, watermark, text"
24
+
25
+ @spaces.GPU(duration=60)
26
+ def generate_video(prompt, negative_prompt, height, width, num_frames, steps, guidance):
27
+ video = pipe(
28
+ prompt=prompt,
29
+ negative_prompt=negative_prompt,
30
+ height=height,
31
+ width=width,
32
+ num_frames=num_frames,
33
+ guidance_scale=guidance,
34
+ num_inference_steps=steps,
35
+ ).frames[0]
36
+
37
+ output_path = "t2v_out.mp4"
38
+ export_to_video(video, output_path, fps=16)
39
+ return output_path
40
+
41
+
42
+ with gr.Blocks() as demo:
43
+ gr.Markdown("# 🎬 Wan2.2 Text-to-Video Demo")
44
+ gr.Markdown("Generate short AI videos from text prompts.")
45
+
46
+ with gr.Row():
47
+ with gr.Column():
48
+ prompt = gr.Textbox(label="Prompt", value=DEFAULT_PROMPT, lines=3)
49
+ negative_prompt = gr.Textbox(label="Negative Prompt", value=DEFAULT_NEGATIVE, lines=2)
50
+ height = gr.Slider(256, 1280, value=720, step=64, label="Height")
51
+ width = gr.Slider(256, 1280, value=1280, step=64, label="Width")
52
+ num_frames = gr.Slider(16, 128, value=81, step=1, label="Number of Frames")
53
+ steps = gr.Slider(1, 20, value=4, step=1, label="Inference Steps")
54
+ guidance = gr.Slider(0.1, 10.0, value=1.0, step=0.1, label="Guidance Scale")
55
+ generate_btn = gr.Button("🚀 Generate Video")
56
+
57
+ with gr.Column():
58
+ video_output = gr.Video(label="Generated Video")
59
+
60
+ generate_btn.click(
61
+ fn=generate_video,
62
+ inputs=[prompt, negative_prompt, height, width, num_frames, steps, guidance],
63
+ outputs=[video_output],
64
+ )
65
+
66
+ demo.launch()