Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import AutoencoderKLWan, WanPipeline, UniPCMultistepScheduler | |
from diffusers.utils import export_to_video | |
from diffusers.loaders.lora_conversion_utils import _convert_non_diffusers_wan_lora_to_diffusers # Keep this if it's the base for standard LoRA parts | |
import gradio as gr | |
import tempfile | |
import os | |
import spaces | |
from huggingface_hub import hf_hub_download | |
import logging # For better logging | |
import re # For key manipulation | |
# --- Global Model Loading & LoRA Handling --- | |
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers" | |
LORA_REPO_ID = "Kijai/WanVideo_comfy" | |
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors" | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
MANUAL_PATCHES_STORE = {"diff": {}, "diff_b": {}} | |
def _custom_convert_non_diffusers_wan_lora_to_diffusers(state_dict): | |
global MANUAL_PATCHES_STORE | |
MANUAL_PATCHES_STORE = {"diff": {}, "diff_b": {}} # Reset for each conversion | |
peft_compatible_state_dict = {} | |
unhandled_keys = [] | |
original_keys_map_to_diffusers = {} | |
# Mapping based on ComfyUI's WanModel structure and PeftAdapterMixin logic | |
# This needs to map the original LoRA key naming to Diffusers' expected PEFT keys | |
# diffusion_model.blocks.0.self_attn.q.lora_down.weight -> transformer.blocks.0.attn1.to_q.lora_A.weight | |
# diffusion_model.blocks.0.ffn.0.lora_down.weight -> transformer.blocks.0.ffn.net.0.proj.lora_A.weight | |
# diffusion_model.text_embedding.0.lora_down.weight -> transformer.condition_embedder.text_embedder.linear_1.lora_A.weight (example) | |
# Strip "diffusion_model." and map | |
for k, v in state_dict.items(): | |
original_k = k # Keep for logging/debugging | |
if k.startswith("diffusion_model."): | |
k_stripped = k[len("diffusion_model."):] | |
elif k.startswith("difusion_model."): # Handle potential typo | |
k_stripped = k[len("difusion_model."):] | |
logger.warning(f"Key '{original_k}' starts with 'difusion_model.' (potential typo), processing as 'diffusion_model.'.") | |
else: | |
unhandled_keys.append(original_k) | |
continue | |
# Handle .diff and .diff_b keys by storing them separately | |
if k_stripped.endswith(".diff"): | |
target_model_key = k_stripped[:-len(".diff")] + ".weight" | |
MANUAL_PATCHES_STORE["diff"][target_model_key] = v | |
continue | |
elif k_stripped.endswith(".diff_b"): | |
target_model_key = k_stripped[:-len(".diff_b")] + ".bias" | |
MANUAL_PATCHES_STORE["diff_b"][target_model_key] = v | |
continue | |
# Handle standard LoRA A/B matrices | |
if ".lora_down.weight" in k_stripped: | |
diffusers_key_base = k_stripped.replace(".lora_down.weight", "") | |
# Apply transformations similar to _convert_non_diffusers_wan_lora_to_diffusers from diffusers | |
# but adapt to the PEFT naming convention (lora_A/lora_B) | |
# This part needs careful mapping based on WanTransformer3DModel structure | |
# Example mappings (these need to be comprehensive for all layers) | |
if diffusers_key_base.startswith("blocks."): | |
parts = diffusers_key_base.split(".") | |
block_idx = parts[1] | |
attn_type = parts[2] # self_attn or cross_attn | |
proj_type = parts[3] # q, k, v, o | |
if attn_type == "self_attn": | |
diffusers_peft_key = f"transformer.blocks.{block_idx}.attn1.to_{proj_type}.lora_A.weight" | |
elif attn_type == "cross_attn": | |
# WanTransformer3DModel uses attn2 for cross-attention like features | |
diffusers_peft_key = f"transformer.blocks.{block_idx}.attn2.to_{proj_type}.lora_A.weight" | |
else: # ffn | |
ffn_idx = proj_type # "0" or "2" | |
diffusers_peft_key = f"transformer.blocks.{block_idx}.ffn.net.{ffn_idx}.proj.lora_A.weight" | |
elif diffusers_key_base.startswith("text_embedding."): | |
idx_map = {"0": "linear_1", "2": "linear_2"} | |
idx = diffusers_key_base.split(".")[1] | |
diffusers_peft_key = f"transformer.condition_embedder.text_embedder.{idx_map[idx]}.lora_A.weight" | |
elif diffusers_key_base.startswith("time_embedding."): | |
idx_map = {"0": "linear_1", "2": "linear_2"} | |
idx = diffusers_key_base.split(".")[1] | |
diffusers_peft_key = f"transformer.condition_embedder.time_embedder.{idx_map[idx]}.lora_A.weight" | |
elif diffusers_key_base.startswith("time_projection."): # Assuming '1' from your example | |
diffusers_peft_key = f"transformer.condition_embedder.time_proj.lora_A.weight" | |
elif diffusers_key_base.startswith("patch_embedding"): | |
# WanTransformer3DModel has 'patch_embedding' at the top level | |
diffusers_peft_key = f"transformer.patch_embedding.lora_A.weight" # This needs to match how PEFT would name it | |
elif diffusers_key_base.startswith("head.head"): | |
diffusers_peft_key = f"transformer.proj_out.lora_A.weight" | |
else: | |
unhandled_keys.append(original_k) | |
continue | |
peft_compatible_state_dict[diffusers_peft_key] = v | |
original_keys_map_to_diffusers[k_stripped] = diffusers_peft_key | |
elif ".lora_up.weight" in k_stripped: | |
# Find the corresponding lora_down key to determine the base name | |
down_key_stripped = k_stripped.replace(".lora_up.weight", ".lora_down.weight") | |
if down_key_stripped in original_keys_map_to_diffusers: | |
diffusers_peft_key_A = original_keys_map_to_diffusers[down_key_stripped] | |
diffusers_peft_key_B = diffusers_peft_key_A.replace(".lora_A.weight", ".lora_B.weight") | |
peft_compatible_state_dict[diffusers_peft_key_B] = v | |
else: | |
unhandled_keys.append(original_k) | |
elif not (k_stripped.endswith(".alpha") or k_stripped.endswith(".dora_scale")): # Alphas are handled by PEFT if lora_A/B present | |
unhandled_keys.append(original_k) | |
if unhandled_keys: | |
logger.warning(f"Custom Wan LoRA Converter: Unhandled keys: {unhandled_keys}") | |
return peft_compatible_state_dict | |
def apply_manual_diff_patches(pipe_model, patches_store, lora_strength=1.0): | |
if not hasattr(pipe_model, "transformer"): | |
logger.error("Pipeline model does not have a 'transformer' attribute to patch.") | |
return | |
transformer = pipe_model.transformer | |
changed_params_count = 0 | |
for key_base, diff_tensor in patches_store.get("diff", {}).items(): | |
# key_base is like "blocks.0.self_attn.q.weight" | |
# We need to prepend "transformer." to match diffusers internal naming | |
target_key_full = f"transformer.{key_base}" | |
try: | |
module_path_parts = target_key_full.split('.') | |
param_name = module_path_parts[-1] | |
module_path = ".".join(module_path_parts[:-1]) | |
module = transformer | |
for part in module_path.split('.')[1:]: # Skip the first 'transformer' | |
module = getattr(module, part) | |
original_param = getattr(module, param_name) | |
if original_param.shape != diff_tensor.shape: | |
logger.warning(f"Shape mismatch for diff patch on {target_key_full}: model {original_param.shape}, lora {diff_tensor.shape}. Skipping.") | |
continue | |
with torch.no_grad(): | |
scaled_diff = (lora_strength * diff_tensor.to(original_param.device, original_param.dtype)) | |
original_param.data.add_(scaled_diff) | |
changed_params_count +=1 | |
except AttributeError: | |
logger.warning(f"Could not find parameter {target_key_full} in transformer to apply diff patch.") | |
except Exception as e: | |
logger.error(f"Error applying diff patch to {target_key_full}: {e}") | |
for key_base, diff_b_tensor in patches_store.get("diff_b", {}).items(): | |
# key_base is like "blocks.0.self_attn.q.bias" | |
target_key_full = f"transformer.{key_base}" | |
try: | |
module_path_parts = target_key_full.split('.') | |
param_name = module_path_parts[-1] | |
module_path = ".".join(module_path_parts[:-1]) | |
module = transformer | |
for part in module_path.split('.')[1:]: | |
module = getattr(module, part) | |
original_param = getattr(module, param_name) | |
if original_param is None: | |
logger.warning(f"Bias parameter {target_key_full} is None in model. Skipping diff_b patch.") | |
continue | |
if original_param.shape != diff_b_tensor.shape: | |
logger.warning(f"Shape mismatch for diff_b patch on {target_key_full}: model {original_param.shape}, lora {diff_b_tensor.shape}. Skipping.") | |
continue | |
with torch.no_grad(): | |
scaled_diff_b = (lora_strength * diff_b_tensor.to(original_param.device, original_param.dtype)) | |
original_param.data.add_(scaled_diff_b) | |
changed_params_count +=1 | |
except AttributeError: | |
logger.warning(f"Could not find parameter {target_key_full} in transformer to apply diff_b patch.") | |
except Exception as e: | |
logger.error(f"Error applying diff_b patch to {target_key_full}: {e}") | |
if changed_params_count > 0: | |
logger.info(f"Applied {changed_params_count} manual diff/diff_b patches.") | |
else: | |
logger.info("No manual diff/diff_b patches were applied.") | |
# --- Model Loading --- | |
logger.info(f"Loading VAE for {MODEL_ID}...") | |
vae = AutoencoderKLWan.from_pretrained( | |
MODEL_ID, | |
subfolder="vae", | |
torch_dtype=torch.float32 # float32 for VAE stability | |
) | |
logger.info(f"Loading Pipeline {MODEL_ID}...") | |
pipe = WanPipeline.from_pretrained( | |
MODEL_ID, | |
vae=vae, | |
torch_dtype=torch.bfloat16 # bfloat16 for pipeline | |
) | |
flow_shift = 8.0 | |
pipe.scheduler = UniPCMultistepScheduler.from_config( | |
pipe.scheduler.config, flow_shift=flow_shift | |
) | |
logger.info("Moving pipeline to CUDA...") | |
pipe.to("cuda") | |
# --- LoRA Loading --- | |
logger.info(f"Downloading LoRA {LORA_FILENAME} from {LORA_REPO_ID}...") | |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME) | |
logger.info("Loading LoRA weights with custom converter...") | |
from safetensors.torch import load_file as load_safetensors | |
raw_lora_state_dict = load_safetensors(causvid_path) | |
# Now call our custom converter which will populate MANUAL_PATCHES_STORE | |
peft_state_dict = _custom_convert_non_diffusers_wan_lora_to_diffusers(raw_lora_state_dict) | |
# Load the LoRA A/B matrices using PEFT | |
if peft_state_dict: | |
pipe.load_lora_weights( | |
peft_state_dict, | |
adapter_name="causvid_lora" | |
) | |
logger.info("PEFT LoRA A/B weights loaded.") | |
else: | |
logger.warning("No PEFT-compatible LoRA weights found after conversion.") | |
# Apply manual diff_b and diff patches | |
apply_manual_diff_patches(pipe, MANUAL_PATCHES_STORE, lora_strength=1.0) # Assuming default strength 1.0 | |
logger.info("Manual diff_b/diff patches applied.") | |
# --- Gradio Interface Function --- | |
def generate_video(prompt, negative_prompt, height, width, num_frames, guidance_scale, steps, fps, progress=gr.Progress(track_tqdm=True)): | |
logger.info("Starting video generation...") | |
logger.info(f" Prompt: {prompt}") | |
logger.info(f" Negative Prompt: {negative_prompt if negative_prompt else 'None'}") | |
logger.info(f" Height: {height}, Width: {width}") | |
logger.info(f" Num Frames: {num_frames}, FPS: {fps}") | |
logger.info(f" Guidance Scale: {guidance_scale}") | |
height = (int(height) // 8) * 8 | |
width = (int(width) // 8) * 8 | |
num_frames = int(num_frames) | |
fps = int(fps) | |
with torch.inference_mode(): | |
output_frames_list = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
height=height, | |
width=width, | |
num_frames=num_frames, | |
guidance_scale=float(guidance_scale), | |
num_inference_steps=steps | |
).frames[0] | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
video_path = tmpfile.name | |
export_to_video(output_frames_list, video_path, fps=fps) | |
logger.info(f"Video successfully generated and saved to {video_path}") | |
return video_path | |
# --- Gradio UI Definition --- | |
default_prompt = "A cat walks on the grass, realistic" | |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" | |
with gr.Blocks() as demo: | |
gr.Markdown(f""" | |
# Text-to-Video with Wan 2.1 (14B) + CausVid LoRA | |
Powered by `diffusers` and `Wan-AI/{MODEL_ID}`. | |
Model is loaded into memory when the app starts. This might take a few minutes. | |
Ensure you have a GPU with sufficient VRAM (e.g., ~24GB+ for these default settings). | |
""") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt, lines=3) | |
negative_prompt_input = gr.Textbox( | |
label="Negative Prompt (Optional)", | |
value=default_negative_prompt, | |
lines=3 | |
) | |
with gr.Row(): | |
height_input = gr.Slider(minimum=256, maximum=768, step=64, value=480, label="Height (multiple of 8)") | |
width_input = gr.Slider(minimum=256, maximum=1024, step=64, value=832, label="Width (multiple of 8)") | |
with gr.Row(): | |
num_frames_input = gr.Slider(minimum=16, maximum=100, step=1, value=25, label="Number of Frames") | |
fps_input = gr.Slider(minimum=5, maximum=30, step=1, value=15, label="Output FPS") | |
steps = gr.Slider(minimum=1.0, maximum=30.0, value=4.0, label="Steps") | |
guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale") | |
generate_button = gr.Button("Generate Video", variant="primary") | |
with gr.Column(scale=3): | |
video_output = gr.Video(label="Generated Video") | |
generate_button.click( | |
fn=generate_video, | |
inputs=[ | |
prompt_input, | |
negative_prompt_input, | |
height_input, | |
width_input, | |
num_frames_input, | |
guidance_scale_input, | |
steps, | |
fps_input | |
], | |
outputs=video_output | |
) | |
gr.Examples( | |
examples=[ | |
["A panda eating bamboo in a lush forest, cinematic lighting", default_negative_prompt, 480, 832, 25, 5.0, 4, 15], | |
["A majestic eagle soaring over snowy mountains", default_negative_prompt, 512, 768, 30, 7.0, 4, 12], | |
["Timelapse of a flower blooming, vibrant colors", "static, ugly", 384, 640, 40, 6.0, 4, 20], | |
["Astronaut walking on the moon, Earth in the background, highly detailed", default_negative_prompt, 480, 832, 20, 5.5, 4, 10], | |
], | |
inputs=[prompt_input, negative_prompt_input, height_input, width_input, num_frames_input, guidance_scale_input, steps, fps_input], | |
outputs=video_output, | |
fn=generate_video, | |
cache_examples=False | |
) | |
if __name__ == "__main__": | |
demo.queue().launch(share=True, debug=True) |