Spaces:
Running
on
Zero
Running
on
Zero
Commit
Β·
1f35d50
1
Parent(s):
16d02f1
Fixed Loading
Browse files
app.py
CHANGED
@@ -1,16 +1,8 @@
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
-
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler, WanTransformer3DModel
|
4 |
from diffusers.utils import export_to_video
|
5 |
-
|
6 |
-
try:
|
7 |
-
from diffusers import WanTextToVideoPipeline
|
8 |
-
IS_T2V_AVAILABLE = True
|
9 |
-
except ImportError:
|
10 |
-
WanTextToVideoPipeline = None # Define as None so later code doesn't raise NameError
|
11 |
-
IS_T2V_AVAILABLE = False
|
12 |
-
print("β οΈ Warning: 'WanTextToVideoPipeline' could not be imported. Your 'diffusers' version might be outdated (requires >= 0.25.0).")
|
13 |
-
from transformers import CLIPVisionModel, CLIPTextModel, CLIPTokenizer
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # noqa
|
15 |
import tempfile
|
16 |
import re
|
@@ -24,24 +16,27 @@ import gradio as gr
|
|
24 |
import random
|
25 |
|
26 |
# --- I2V (Image-to-Video) Configuration ---
|
27 |
-
|
28 |
-
|
|
|
29 |
|
30 |
# --- T2V (Text-to-Video) Configuration ---
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
# --- Load Pipelines ---
|
35 |
print("π Loading I2V pipeline from single file...")
|
36 |
i2v_pipe = None
|
37 |
try:
|
38 |
# Load components needed for the pipeline from the base model repo
|
39 |
-
i2v_image_encoder = CLIPVisionModel.from_pretrained(
|
40 |
-
i2v_vae = AutoencoderKLWan.from_pretrained(
|
41 |
|
42 |
-
# Load the main transformer from the
|
43 |
i2v_transformer = WanTransformer3DModel.from_single_file(
|
44 |
-
|
|
|
45 |
torch_dtype=torch.bfloat16
|
46 |
)
|
47 |
|
@@ -58,34 +53,37 @@ except Exception as e:
|
|
58 |
print(f"β Critical Error: Failed to load I2V pipeline from single file.")
|
59 |
traceback.print_exc()
|
60 |
|
61 |
-
print("\nπ Loading T2V pipeline
|
62 |
t2v_pipe = None
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
traceback.print_exc()
|
89 |
|
90 |
# --- LLM Prompt Enhancer Setup ---
|
91 |
print("\nπ€ Loading LLM for Prompt Enhancement (Qwen/Qwen3-8B)...")
|
@@ -149,6 +147,7 @@ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
|
|
149 |
MAX_SEED = np.iinfo(np.int32).max
|
150 |
|
151 |
FIXED_FPS = 24
|
|
|
152 |
MIN_FRAMES_MODEL = 8
|
153 |
MAX_FRAMES_MODEL = 81
|
154 |
|
@@ -462,7 +461,7 @@ def generate_t2v_video(prompt, height, width,
|
|
462 |
|
463 |
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
|
464 |
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
|
465 |
-
num_frames = np.clip(int(round(duration_seconds *
|
466 |
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
467 |
enhanced_prompt = f"{prompt}, cinematic, high detail, professional lighting"
|
468 |
|
@@ -482,7 +481,7 @@ def generate_t2v_video(prompt, height, width,
|
|
482 |
filename = f"t2v_{sanitized_prompt}_{current_seed}.mp4"
|
483 |
temp_dir = tempfile.mkdtemp()
|
484 |
video_path = os.path.join(temp_dir, filename)
|
485 |
-
export_to_video(output_frames_list, video_path, fps=
|
486 |
|
487 |
return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"π₯ Download: {filename}")
|
488 |
|
@@ -530,7 +529,7 @@ with gr.Blocks(css=custom_css) as demo:
|
|
530 |
|
531 |
# --- Text-to-Video Tab ---
|
532 |
with gr.TabItem("βοΈ Text-to-Video", id="t2v_tab", interactive=t2v_pipe is not None):
|
533 |
-
if
|
534 |
gr.Markdown("<h3 style='color: #ff9999; text-align: center;'>β οΈ Text-to-Video Pipeline Failed to Load. This tab is disabled.</h3>")
|
535 |
else:
|
536 |
with gr.Row():
|
@@ -548,7 +547,7 @@ with gr.Blocks(css=custom_css) as demo:
|
|
548 |
minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
|
549 |
maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
|
550 |
step=0.1, value=2, label="β±οΈ Duration (seconds)",
|
551 |
-
info=f"Generates {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {
|
552 |
)
|
553 |
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
554 |
t2v_neg_prompt = gr.Textbox(label="β Negative Prompt", value=default_negative_prompt, lines=4)
|
@@ -558,7 +557,7 @@ with gr.Blocks(css=custom_css) as demo:
|
|
558 |
t2v_height = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"π Height ({MOD_VALUE}px steps)")
|
559 |
t2v_width = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"π Width ({MOD_VALUE}px steps)")
|
560 |
t2v_steps = gr.Slider(minimum=1, maximum=25, step=1, value=15, label="π Inference Steps", info="15-20 recommended for quality.")
|
561 |
-
t2v_guidance = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=
|
562 |
|
563 |
t2v_generate_btn = gr.Button("π¬ Generate T2V", variant="primary", elem_classes=["generate-btn"])
|
564 |
|
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
+
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler, WanTransformer3DModel, AutoModel, DiffusionPipeline
|
4 |
from diffusers.utils import export_to_video
|
5 |
+
from transformers import CLIPVisionModel, UMT5EncoderModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # noqa
|
7 |
import tempfile
|
8 |
import re
|
|
|
16 |
import random
|
17 |
|
18 |
# --- I2V (Image-to-Video) Configuration ---
|
19 |
+
I2V_BASE_MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" # Used for VAE/encoder components
|
20 |
+
I2V_FUSIONX_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
21 |
+
I2V_FUSIONX_FILENAME = "Wan14Bi2vFusioniX.safetensors"
|
22 |
|
23 |
# --- T2V (Text-to-Video) Configuration ---
|
24 |
+
T2V_BASE_MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
25 |
+
T2V_LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
26 |
+
T2V_LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
|
27 |
|
28 |
# --- Load Pipelines ---
|
29 |
print("π Loading I2V pipeline from single file...")
|
30 |
i2v_pipe = None
|
31 |
try:
|
32 |
# Load components needed for the pipeline from the base model repo
|
33 |
+
i2v_image_encoder = CLIPVisionModel.from_pretrained(I2V_BASE_MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
|
34 |
+
i2v_vae = AutoencoderKLWan.from_pretrained(I2V_BASE_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
|
35 |
|
36 |
+
# Load the main transformer from the repo and filename
|
37 |
i2v_transformer = WanTransformer3DModel.from_single_file(
|
38 |
+
I2V_FUSIONX_REPO_ID,
|
39 |
+
filename=I2V_FUSIONX_FILENAME,
|
40 |
torch_dtype=torch.bfloat16
|
41 |
)
|
42 |
|
|
|
53 |
print(f"β Critical Error: Failed to load I2V pipeline from single file.")
|
54 |
traceback.print_exc()
|
55 |
|
56 |
+
print("\nπ Loading T2V pipeline with LoRA...")
|
57 |
t2v_pipe = None
|
58 |
+
try:
|
59 |
+
|
60 |
+
# Load components needed for the T2V pipeline
|
61 |
+
text_encoder = UMT5EncoderModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="text_encoder", torch_dtype=torch.bfloat16)
|
62 |
+
vae = AutoModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
|
63 |
+
transformer = AutoModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="transformer", torch_dtype=torch.bfloat16)
|
64 |
+
|
65 |
+
# Assemble the final pipeline
|
66 |
+
t2v_pipe = DiffusionPipeline.from_pretrained(
|
67 |
+
"Wan-AI/Wan2.1-T2V-14B-Diffusers",
|
68 |
+
vae=vae,
|
69 |
+
transformer=transformer,
|
70 |
+
text_encoder=text_encoder,
|
71 |
+
torch_dtype=torch.bfloat16
|
72 |
+
)
|
73 |
+
t2v_pipe.to("cuda")
|
74 |
|
75 |
+
t2v_pipe.load_lora_weights(
|
76 |
+
T2V_LORA_REPO_ID,
|
77 |
+
weight_name=T2V_LORA_FILENAME,
|
78 |
+
adapter_name="fusionx_t2v"
|
79 |
+
)
|
80 |
+
t2v_pipe.set_adapters(["fusionx_t2v"], adapter_weights=[0.75])
|
81 |
+
|
82 |
+
|
83 |
+
print("β
T2V pipeline and LoRA loaded and fused successfully.")
|
84 |
+
except Exception as e:
|
85 |
+
print(f"β Critical Error: Failed to load T2V pipeline.")
|
86 |
+
traceback.print_exc()
|
|
|
87 |
|
88 |
# --- LLM Prompt Enhancer Setup ---
|
89 |
print("\nπ€ Loading LLM for Prompt Enhancement (Qwen/Qwen3-8B)...")
|
|
|
147 |
MAX_SEED = np.iinfo(np.int32).max
|
148 |
|
149 |
FIXED_FPS = 24
|
150 |
+
T2V_FIXED_FPS = 16
|
151 |
MIN_FRAMES_MODEL = 8
|
152 |
MAX_FRAMES_MODEL = 81
|
153 |
|
|
|
461 |
|
462 |
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
|
463 |
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
|
464 |
+
num_frames = np.clip(int(round(duration_seconds * T2V_FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
|
465 |
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
466 |
enhanced_prompt = f"{prompt}, cinematic, high detail, professional lighting"
|
467 |
|
|
|
481 |
filename = f"t2v_{sanitized_prompt}_{current_seed}.mp4"
|
482 |
temp_dir = tempfile.mkdtemp()
|
483 |
video_path = os.path.join(temp_dir, filename)
|
484 |
+
export_to_video(output_frames_list, video_path, fps=T2V_FIXED_FPS)
|
485 |
|
486 |
return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"π₯ Download: {filename}")
|
487 |
|
|
|
529 |
|
530 |
# --- Text-to-Video Tab ---
|
531 |
with gr.TabItem("βοΈ Text-to-Video", id="t2v_tab", interactive=t2v_pipe is not None):
|
532 |
+
if t2v_pipe is None:
|
533 |
gr.Markdown("<h3 style='color: #ff9999; text-align: center;'>β οΈ Text-to-Video Pipeline Failed to Load. This tab is disabled.</h3>")
|
534 |
else:
|
535 |
with gr.Row():
|
|
|
547 |
minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
|
548 |
maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
|
549 |
step=0.1, value=2, label="β±οΈ Duration (seconds)",
|
550 |
+
info=f"Generates {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {T2V_FIXED_FPS}fps."
|
551 |
)
|
552 |
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
553 |
t2v_neg_prompt = gr.Textbox(label="β Negative Prompt", value=default_negative_prompt, lines=4)
|
|
|
557 |
t2v_height = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"π Height ({MOD_VALUE}px steps)")
|
558 |
t2v_width = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"π Width ({MOD_VALUE}px steps)")
|
559 |
t2v_steps = gr.Slider(minimum=1, maximum=25, step=1, value=15, label="π Inference Steps", info="15-20 recommended for quality.")
|
560 |
+
t2v_guidance = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=5.0, label="π― Guidance Scale")
|
561 |
|
562 |
t2v_generate_btn = gr.Button("π¬ Generate T2V", variant="primary", elem_classes=["generate-btn"])
|
563 |
|