thankfulcarp's picture
Lora Fix
0de16d8
raw
history blame
19.3 kB
import spaces
import torch
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
from transformers import CLIPVisionModel
import gradio as gr
import tempfile
import re
import os
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import random
# Base MODEL_ID (using original Wan model that's compatible with diffusers)
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers"
# Merged FusionX enhancement LoRA
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
LORA_FILENAME = "Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
LORA_SUBFOLDER = "FusionX_LoRa"
# Load enhanced model components
print("πŸš€ Loading FusionX Enhanced Wan2.1 I2V Model...")
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
)
# FusionX optimized scheduler settings
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
pipe.to("cuda")
lora_adapters = []
lora_weights = []
# Load and fuse the single merged FusionX LoRA
try:
# Load CausVid LoRA (strength 1.0 as per FusionX)
lora_path = hf_hub_download(
repo_id=LORA_REPO_ID,
filename=LORA_FILENAME,
subfolder=LORA_SUBFOLDER
)
pipe.load_lora_weights(lora_path, adapter_name="fusionx")
lora_adapters.append("fusionx")
lora_weights.append(1.0) # FusionX uses 1.0
print("βœ… FusionX LoRA loaded (strength: 1.0)")
except Exception as e:
print(f"⚠️ FusionX LoRA not loaded: {e}")
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE = 576 # FusionX optimized default
DEFAULT_W_SLIDER_VALUE = 1024 # FusionX optimized default
NEW_FORMULA_MAX_AREA = 576.0 * 1024.0 # Updated for FusionX
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1080
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1920
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 24
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 121 # FusionX supports up to 121 frames
# Enhanced prompts for FusionX-style output
default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography"
default_negative_prompt = "Static image, no motion, blurred details, overexposed, underexposed, low quality, worst quality, JPEG artifacts, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, watermark, text, signature, three legs, many people in the background, walking backwards"
# Enhanced CSS for FusionX theme
custom_css = """
/* Enhanced FusionX theme with cinematic styling */
.gradio-container {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
background: linear-gradient(135deg, #1a1a2e 0%, #16213e 25%, #0f3460 50%, #533a7d 75%, #6a4c93 100%) !important;
background-size: 400% 400% !important;
animation: cinematicShift 20s ease infinite !important;
}
@keyframes cinematicShift {
0% { background-position: 0% 50%; }
25% { background-position: 100% 50%; }
50% { background-position: 100% 100%; }
75% { background-position: 0% 100%; }
100% { background-position: 0% 50%; }
}
/* Main container with cinematic glass effect */
.main-container {
backdrop-filter: blur(15px);
background: rgba(255, 255, 255, 0.08) !important;
border-radius: 25px !important;
padding: 35px !important;
box-shadow: 0 12px 40px 0 rgba(31, 38, 135, 0.4) !important;
border: 1px solid rgba(255, 255, 255, 0.15) !important;
position: relative;
overflow: hidden;
}
.main-container::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(45deg, rgba(255,255,255,0.1) 0%, transparent 50%, rgba(255,255,255,0.05) 100%);
pointer-events: none;
}
/* Enhanced header with FusionX branding */
h1 {
background: linear-gradient(45deg, #ffffff, #f0f8ff, #e6e6fa) !important;
-webkit-background-clip: text !important;
-webkit-text-fill-color: transparent !important;
background-clip: text !important;
font-weight: 900 !important;
font-size: 2.8rem !important;
text-align: center !important;
margin-bottom: 2.5rem !important;
text-shadow: 2px 2px 8px rgba(0,0,0,0.3) !important;
position: relative;
}
h1::after {
content: '🎬 FusionX Enhanced';
display: block;
font-size: 1rem;
color: #6a4c93;
margin-top: 0.5rem;
font-weight: 500;
}
/* Enhanced component containers */
.input-container, .output-container {
background: rgba(255, 255, 255, 0.06) !important;
border-radius: 20px !important;
padding: 25px !important;
margin: 15px 0 !important;
backdrop-filter: blur(10px) !important;
border: 1px solid rgba(255, 255, 255, 0.12) !important;
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1) !important;
}
/* Cinematic input styling */
input, textarea, .gr-box {
background: rgba(255, 255, 255, 0.95) !important;
border: 1px solid rgba(106, 76, 147, 0.3) !important;
border-radius: 12px !important;
color: #1a1a2e !important;
transition: all 0.4s ease !important;
box-shadow: 0 2px 8px rgba(106, 76, 147, 0.1) !important;
}
input:focus, textarea:focus {
background: rgba(255, 255, 255, 1) !important;
border-color: #6a4c93 !important;
box-shadow: 0 0 0 3px rgba(106, 76, 147, 0.15) !important;
transform: translateY(-1px) !important;
}
/* Enhanced FusionX button */
.generate-btn {
background: linear-gradient(135deg, #6a4c93 0%, #533a7d 50%, #0f3460 100%) !important;
color: white !important;
font-weight: 700 !important;
font-size: 1.2rem !important;
padding: 15px 40px !important;
border-radius: 60px !important;
border: none !important;
cursor: pointer !important;
transition: all 0.4s ease !important;
box-shadow: 0 6px 20px rgba(106, 76, 147, 0.4) !important;
position: relative;
overflow: hidden;
}
.generate-btn::before {
content: '';
position: absolute;
top: 0;
left: -100%;
width: 100%;
height: 100%;
background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent);
transition: left 0.5s ease;
}
.generate-btn:hover::before {
left: 100%;
}
.generate-btn:hover {
transform: translateY(-3px) scale(1.02) !important;
box-shadow: 0 8px 25px rgba(106, 76, 147, 0.6) !important;
}
/* Enhanced slider styling */
input[type="range"] {
background: transparent !important;
}
input[type="range"]::-webkit-slider-track {
background: linear-gradient(90deg, rgba(106, 76, 147, 0.3), rgba(83, 58, 125, 0.5)) !important;
border-radius: 8px !important;
height: 8px !important;
}
input[type="range"]::-webkit-slider-thumb {
background: linear-gradient(135deg, #6a4c93, #533a7d) !important;
border: 3px solid white !important;
border-radius: 50% !important;
cursor: pointer !important;
width: 22px !important;
height: 22px !important;
-webkit-appearance: none !important;
box-shadow: 0 2px 8px rgba(106, 76, 147, 0.3) !important;
}
/* Enhanced accordion */
.gr-accordion {
background: rgba(255, 255, 255, 0.04) !important;
border-radius: 15px !important;
border: 1px solid rgba(255, 255, 255, 0.08) !important;
margin: 20px 0 !important;
backdrop-filter: blur(5px) !important;
}
/* Enhanced labels */
label {
color: #ffffff !important;
font-weight: 600 !important;
font-size: 1rem !important;
margin-bottom: 8px !important;
text-shadow: 1px 1px 2px rgba(0,0,0,0.5) !important;
}
/* Enhanced image upload */
.image-upload {
border: 3px dashed rgba(106, 76, 147, 0.4) !important;
border-radius: 20px !important;
background: rgba(255, 255, 255, 0.03) !important;
transition: all 0.4s ease !important;
position: relative;
}
.image-upload:hover {
border-color: rgba(106, 76, 147, 0.7) !important;
background: rgba(255, 255, 255, 0.08) !important;
transform: scale(1.01) !important;
}
/* Enhanced video output */
video {
border-radius: 20px !important;
box-shadow: 0 8px 30px rgba(0, 0, 0, 0.4) !important;
border: 2px solid rgba(106, 76, 147, 0.3) !important;
}
/* Enhanced examples section */
.gr-examples {
background: rgba(255, 255, 255, 0.04) !important;
border-radius: 20px !important;
padding: 25px !important;
margin-top: 25px !important;
border: 1px solid rgba(255, 255, 255, 0.1) !important;
}
/* Enhanced checkbox */
input[type="checkbox"] {
accent-color: #6a4c93 !important;
transform: scale(1.2) !important;
}
/* Responsive enhancements */
@media (max-width: 768px) {
h1 { font-size: 2.2rem !important; }
.main-container { padding: 25px !important; }
.generate-btn { padding: 12px 30px !important; font-size: 1.1rem !important; }
}
/* Badge container styling */
.badge-container {
display: flex;
justify-content: center;
gap: 15px;
margin: 20px 0;
flex-wrap: wrap;
}
.badge-container img {
border-radius: 8px;
transition: transform 0.3s ease;
}
.badge-container img:hover {
transform: scale(1.05);
}
"""
def sanitize_prompt_for_filename(prompt: str, max_len: int = 60) -> str:
"""Sanitizes a prompt string to be used as a valid filename."""
if not prompt:
prompt = "video"
# Remove non-alphanumeric characters (except spaces, hyphens, underscores)
sanitized = re.sub(r'[^\w\s_-]', '', prompt).strip()
# Replace spaces and multiple hyphens/underscores with a single underscore
sanitized = re.sub(r'[\s_-]+', '_', sanitized)
# Truncate to max_len
return sanitized[:max_len]
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
min_slider_h, max_slider_h,
min_slider_w, max_slider_w,
default_h, default_w):
orig_w, orig_h = pil_image.size
if orig_w <= 0 or orig_h <= 0:
return default_h, default_w
aspect_ratio = orig_h / orig_w
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
return new_h, new_w
def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
if uploaded_pil_image is None:
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
try:
new_h, new_w = _calculate_new_dimensions_wan(
uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
)
return gr.update(value=new_h), gr.update(value=new_w)
except Exception as e:
gr.Warning("Error attempting to calculate new dimensions")
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
def get_duration(input_image, prompt, height, width,
negative_prompt, duration_seconds,
guidance_scale, steps, lora_scale,
seed, randomize_seed,
progress):
# FusionX optimized duration calculation
if steps > 8 and duration_seconds > 3:
return 600
elif steps > 8 or duration_seconds > 3:
return 300
else:
return 150
@spaces.GPU(duration=get_duration)
def generate_video(input_image, prompt, height, width,
negative_prompt=default_negative_prompt, duration_seconds=3,
guidance_scale=1, steps=8, lora_scale=1.0,
seed=42, randomize_seed=False,
progress=gr.Progress(track_tqdm=True)):
if input_image is None:
raise gr.Error("Please upload an input image.")
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
resized_image = input_image.resize((target_w, target_h))
# Enhanced prompt for FusionX-style output
enhanced_prompt = f"{prompt}, cinematic quality, smooth motion, detailed animation, dynamic lighting"
with torch.inference_mode():
output_frames_list = pipe(
image=resized_image,
prompt=enhanced_prompt,
negative_prompt=negative_prompt,
height=target_h,
width=target_w,
num_frames=num_frames,
guidance_scale=float(guidance_scale),
num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed),
cross_attention_kwargs={"scale": float(lora_scale)}
).frames[0]
# Create a unique filename for download
sanitized_prompt = sanitize_prompt_for_filename(prompt)
filename = f"{sanitized_prompt}_{current_seed}.mp4"
temp_dir = tempfile.mkdtemp()
video_path = os.path.join(temp_dir, filename)
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"πŸ“₯ Download: {filename}")
with gr.Blocks() as demo:
with gr.Column(elem_classes=["main-container"]):
gr.Markdown("# ⚑ FusionX Enhanced Wan 2.1 I2V (14B)")
with gr.Row():
with gr.Column(elem_classes=["input-container"]):
input_image_component = gr.Image(
type="pil",
label="πŸ–ΌοΈ Input Image (auto-resized to target H/W)",
elem_classes=["image-upload"]
)
prompt_input = gr.Textbox(
label="✏️ Enhanced Prompt (FusionX-style enhancements applied)",
value=default_prompt_i2v,
lines=3
)
duration_seconds_input = gr.Slider(
minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
step=0.1,
value=2,
label="⏱️ Duration (seconds)",
info=f"FusionX Enhanced supports {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps. Recommended: 2-5 seconds"
)
with gr.Accordion("βš™οΈ Advanced FusionX Settings", open=False):
negative_prompt_input = gr.Textbox(
label="❌ Negative Prompt (FusionX Enhanced)",
value=default_negative_prompt,
lines=4
)
seed_input = gr.Slider(
label="🎲 Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
interactive=True
)
randomize_seed_checkbox = gr.Checkbox(
label="πŸ”€ Randomize seed",
value=True,
interactive=True
)
with gr.Row():
height_input = gr.Slider(
minimum=SLIDER_MIN_H,
maximum=SLIDER_MAX_H,
step=MOD_VALUE,
value=DEFAULT_H_SLIDER_VALUE,
label=f"πŸ“ Output Height (FusionX optimized: {MOD_VALUE} multiples)"
)
width_input = gr.Slider(
minimum=SLIDER_MIN_W,
maximum=SLIDER_MAX_W,
step=MOD_VALUE,
value=DEFAULT_W_SLIDER_VALUE,
label=f"πŸ“ Output Width (FusionX optimized: {MOD_VALUE} multiples)"
)
lora_scale_slider = gr.Slider(
minimum=0.0,
maximum=2.5,
step=0.05,
value=1.0,
label="πŸ’ͺ FusionX LoRA Strength",
info="Control the intensity of the FusionX effect. >1.0 for stronger effect, <1.0 for less."
)
steps_slider = gr.Slider(
minimum=1,
maximum=20,
step=1,
value=8, # FusionX optimized
label="πŸš€ Inference Steps (FusionX Enhanced: 8-10 recommended)",
info="FusionX Enhanced delivers excellent results in just 8-10 steps!"
)
guidance_scale_input = gr.Slider(
minimum=0.0,
maximum=20.0,
step=0.5,
value=1.0,
label="🎯 Guidance Scale (FusionX optimized)",
visible=False
)
generate_button = gr.Button(
"🎬 Generate FusionX Enhanced Video",
variant="primary",
elem_classes=["generate-btn"]
)
with gr.Column(elem_classes=["output-container"]):
video_output = gr.Video(
label="πŸŽ₯ FusionX Enhanced Generated Video",
autoplay=True,
interactive=False
)
download_output = gr.File(label="πŸ“₯ Download Video", visible=False)
input_image_component.upload(
fn=handle_image_upload_for_dims_wan,
inputs=[input_image_component, height_input, width_input],
outputs=[height_input, width_input]
)
input_image_component.clear(
fn=handle_image_upload_for_dims_wan,
inputs=[input_image_component, height_input, width_input],
outputs=[height_input, width_input]
)
ui_inputs = [
input_image_component, prompt_input, height_input, width_input,
negative_prompt_input, duration_seconds_input,
guidance_scale_input, steps_slider, lora_scale_slider, seed_input, randomize_seed_checkbox
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input, download_output])
if __name__ == "__main__":
demo.queue().launch()