Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler | |
from diffusers.utils import export_to_video, load_image | |
from transformers import CLIPVisionModel | |
import gradio as gr | |
import tempfile | |
import os | |
import spaces # Assuming this is for Hugging Face Spaces GPU decorator | |
from huggingface_hub import hf_hub_download | |
import logging | |
import numpy as np | |
from PIL import Image | |
# --- Global Model Loading & LoRA Handling --- | |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" | |
LORA_REPO_ID = "Kijai/WanVideo_comfy" | |
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors" | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# --- Model Loading --- | |
logger.info(f"Loading Image Encoder for {MODEL_ID}...") | |
image_encoder = CLIPVisionModel.from_pretrained( | |
MODEL_ID, | |
subfolder="image_encoder", | |
torch_dtype=torch.float32 | |
) | |
logger.info(f"Loading VAE for {MODEL_ID}...") | |
vae = AutoencoderKLWan.from_pretrained( | |
MODEL_ID, | |
subfolder="vae", | |
torch_dtype=torch.float32 | |
) | |
logger.info(f"Loading Pipeline {MODEL_ID}...") | |
pipe = WanImageToVideoPipeline.from_pretrained( | |
MODEL_ID, | |
vae=vae, | |
image_encoder=image_encoder, | |
torch_dtype=torch.bfloat16 | |
) | |
flow_shift = 8.0 | |
pipe.scheduler = UniPCMultistepScheduler.from_config( | |
pipe.scheduler.config, flow_shift=flow_shift | |
) | |
logger.info("Moving pipeline to CUDA...") | |
pipe.to("cuda") | |
# --- LoRA Loading --- | |
logger.info(f"Downloading LoRA {LORA_FILENAME} from {LORA_REPO_ID}...") | |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME) | |
logger.info("Loading LoRA weights...") | |
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora") | |
logger.info("Setting LoRA adapter...") | |
pipe.set_adapters(["causvid_lora"], adapter_weights=[1.0]) | |
MOD_VALUE = 128 | |
MOD_VALUE_H = MOD_VALUE_W = MOD_VALUE | |
DEFAULT_H_SLIDER_VALUE = 384 # (3 * 128) | |
DEFAULT_W_SLIDER_VALUE = 640 # (5 * 128) | |
DEFAULT_TARGET_AREA = float(DEFAULT_H_SLIDER_VALUE * DEFAULT_W_SLIDER_VALUE) | |
SLIDER_MIN_H = 128 | |
SLIDER_MAX_H = 512 | |
SLIDER_MIN_W = 128 | |
SLIDER_MAX_W = 854 | |
def _calculate_new_dimensions_wan(pil_image: Image.Image, mod_val: int, target_area: float, | |
min_h: int, max_h: int, min_w: int, max_w: int, | |
default_h: int, default_w: int) -> tuple[int, int]: | |
orig_w, orig_h = pil_image.size | |
if orig_w == 0 or orig_h == 0: | |
logger.warning("Uploaded image has zero width or height. Using default slider dimensions.") | |
return default_h, default_w | |
aspect_ratio = orig_h / orig_w | |
ideal_h = np.sqrt(target_area * aspect_ratio) | |
ideal_w = np.sqrt(target_area / aspect_ratio) | |
calc_h = round(ideal_h / mod_val) * mod_val | |
calc_w = round(ideal_w / mod_val) * mod_val | |
calc_h = mod_val if calc_h < mod_val else calc_h # Ensure at least one mod_val unit | |
calc_w = mod_val if calc_w < mod_val else calc_w # Ensure at least one mod_val unit | |
new_h = int(np.clip(calc_h, min_h, max_h)) | |
new_w = int(np.clip(calc_w, min_w, max_w)) | |
logger.info(f"Auto-dim: Original {orig_w}x{orig_h} (AR: {aspect_ratio:.2f}). Target Area: {target_area}.") | |
logger.info(f"Auto-dim: Ideal HxW: {ideal_h:.0f}x{ideal_w:.0f}. Rounded (step {mod_val}): {calc_h}x{calc_w}.") | |
logger.info(f"Auto-dim: Clamped HxW: {new_h}x{new_w} (H_range:[{min_h}-{max_h}], W_range:[{min_w}-{max_w}]).") | |
return new_h, new_w | |
def handle_image_upload_for_dims_wan(uploaded_pil_image: Image.Image | None, current_h_val: int, current_w_val: int): | |
if uploaded_pil_image is None: | |
logger.info("Image cleared. Resetting dimensions to default slider values.") | |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) | |
try: | |
new_h, new_w = _calculate_new_dimensions_wan( | |
uploaded_pil_image, | |
MOD_VALUE, # Use the globally determined MOD_VALUE | |
DEFAULT_TARGET_AREA, | |
SLIDER_MIN_H, SLIDER_MAX_H, | |
SLIDER_MIN_W, SLIDER_MAX_W, | |
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE | |
) | |
return gr.update(value=new_h), gr.update(value=new_w) | |
except Exception as e: | |
logger.error(f"Error auto-adjusting H/W from image: {e}", exc_info=True) | |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE) | |
# --- Gradio Interface Function --- | |
# type: ignore | |
def generate_video(input_image: Image.Image, prompt: str, negative_prompt: str, | |
height: int, width: int, num_frames: int, | |
guidance_scale: float, steps: int, fps_for_conditioning_and_export: int, | |
progress=gr.Progress(track_tqdm=True)): | |
if input_image is None: | |
raise gr.Error("Please upload an input image.") | |
logger.info("Starting video generation...") | |
logger.info(f" Input Image: Uploaded (Original size: {input_image.size if input_image else 'N/A'})") | |
logger.info(f" Prompt: {prompt}") | |
logger.info(f" Negative Prompt: {negative_prompt if negative_prompt else 'None'}") | |
logger.info(f" Target Output Height: {height}, Target Output Width: {width}") | |
logger.info(f" Num Frames: {num_frames}, FPS for conditioning & export: {fps_for_conditioning_and_export}") | |
logger.info(f" Guidance Scale: {guidance_scale}, Steps: {steps}") | |
target_height = int(height) | |
target_width = int(width) | |
num_frames = int(num_frames) | |
fps_val = int(fps_for_conditioning_and_export) | |
guidance_scale_val = float(guidance_scale) | |
steps_val = int(steps) | |
# Ensure dimensions are compatible (already handled by slider steps and auto-adjustment) | |
if target_height % MOD_VALUE_H != 0: | |
logger.warning(f"Height {target_height} is not a multiple of {MOD_VALUE_H}. Adjusting...") | |
target_height = (target_height // MOD_VALUE_H) * MOD_VALUE_H | |
if target_width % MOD_VALUE_W != 0: | |
logger.warning(f"Width {target_width} is not a multiple of {MOD_VALUE_W}. Adjusting...") | |
target_width = (target_width // MOD_VALUE_W) * MOD_VALUE_W | |
target_height = max(MOD_VALUE_H, target_height) # Ensure minimum size | |
target_width = max(MOD_VALUE_W, target_width) # Ensure minimum size | |
resized_image = input_image.resize((target_width, target_height)) | |
logger.info(f" Input image resized to: {resized_image.size} for pipeline input.") | |
with torch.inference_mode(): | |
output_frames_list = pipe( | |
image=resized_image, | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
height=target_height, | |
width=target_width, | |
num_frames=num_frames, | |
guidance_scale=guidance_scale_val, | |
num_inference_steps=steps_val, | |
generator=torch.Generator(device="cuda").manual_seed(0) | |
).frames[0] | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
video_path = tmpfile.name | |
export_to_video(output_frames_list, video_path, fps=fps_val) | |
logger.info(f"Video successfully generated and saved to {video_path}") | |
return video_path | |
# --- Gradio UI Definition --- | |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation" | |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature" | |
penguin_image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png" | |
with gr.Blocks() as demo: | |
gr.Markdown(f""" | |
# Image-to-Video with Wan 2.1 I2V (14B) + CausVid LoRA | |
Powered by `diffusers` and `{MODEL_ID}`. | |
Model is loaded into memory when the app starts. This might take a few minutes. | |
Ensure you have a GPU with sufficient VRAM (e.g., ~24GB+ for these default settings). | |
Output Height and Width must be multiples of **{MOD_VALUE}**. Uploading an image will suggest dimensions based on its aspect ratio and a target area. | |
""") | |
with gr.Row(): | |
with gr.Column(scale=2): | |
input_image_component = gr.Image(type="pil", label="Input Image (will be resized to target H/W)") | |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v, lines=3) | |
with gr.Accordion("Advanced Settings", open=False): | |
negative_prompt_input = gr.Textbox( | |
label="Negative Prompt (Optional)", | |
value=default_negative_prompt, | |
lines=3 | |
) | |
with gr.Row(): | |
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})") | |
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})") | |
with gr.Row(): | |
num_frames_input = gr.Slider(minimum=8, maximum=81, step=1, value=41, label="Number of Frames") | |
fps_input = gr.Slider(minimum=5, maximum=30, step=1, value=24, label="FPS (for conditioning & export)") | |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps") | |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale") | |
generate_button = gr.Button("Generate Video", variant="primary") | |
with gr.Column(scale=3): | |
video_output = gr.Video(label="Generated Video", interactive=False) | |
input_image_component.upload( | |
fn=handle_image_upload_for_dims_wan, | |
inputs=[input_image_component, height_input, width_input], | |
outputs=[height_input, width_input] | |
) | |
inputs_for_click_and_examples = [ | |
input_image_component, | |
prompt_input, | |
negative_prompt_input, | |
height_input, | |
width_input, | |
num_frames_input, | |
guidance_scale_input, | |
steps_slider, | |
fps_input | |
] | |
generate_button.click( | |
fn=generate_video, | |
inputs=inputs_for_click_and_examples, | |
outputs=video_output | |
) | |
gr.Examples( | |
examples=[ | |
[penguin_image_url, "a penguin playfully dancing in the snow, Antarctica", default_negative_prompt, DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE, 25, 1.0, 4, 16], | |
], | |
inputs=inputs_for_click_and_examples, | |
outputs=video_output, | |
fn=generate_video, | |
cache_examples=False | |
) | |
if __name__ == "__main__": | |
demo.queue().launch(share=True, debug=True) |