thankfulcarp commited on
Commit
1f35d50
Β·
1 Parent(s): 16d02f1

Fixed Loading

Browse files
Files changed (1) hide show
  1. app.py +48 -49
app.py CHANGED
@@ -1,16 +1,8 @@
1
  import spaces
2
  import torch
3
- from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler, WanTransformer3DModel
4
  from diffusers.utils import export_to_video
5
- # Conditionally import T2V pipeline to handle different diffusers versions and prevent crashes.
6
- try:
7
- from diffusers import WanTextToVideoPipeline
8
- IS_T2V_AVAILABLE = True
9
- except ImportError:
10
- WanTextToVideoPipeline = None # Define as None so later code doesn't raise NameError
11
- IS_T2V_AVAILABLE = False
12
- print("⚠️ Warning: 'WanTextToVideoPipeline' could not be imported. Your 'diffusers' version might be outdated (requires >= 0.25.0).")
13
- from transformers import CLIPVisionModel, CLIPTextModel, CLIPTokenizer
14
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # noqa
15
  import tempfile
16
  import re
@@ -24,24 +16,27 @@ import gradio as gr
24
  import random
25
 
26
  # --- I2V (Image-to-Video) Configuration ---
27
- I2V_MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" # Used for VAE/encoder components
28
- I2V_SINGLE_FILE_URL = "https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX/resolve/main/Wan14Bi2vFusioniX.safetensors"
 
29
 
30
  # --- T2V (Text-to-Video) Configuration ---
31
- T2V_MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
32
- T2V_SINGLE_FILE_URL = "https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX/resolve/main/WanT2V_MasterModel.safetensors"
 
33
 
34
  # --- Load Pipelines ---
35
  print("πŸš€ Loading I2V pipeline from single file...")
36
  i2v_pipe = None
37
  try:
38
  # Load components needed for the pipeline from the base model repo
39
- i2v_image_encoder = CLIPVisionModel.from_pretrained(I2V_MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
40
- i2v_vae = AutoencoderKLWan.from_pretrained(I2V_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
41
 
42
- # Load the main transformer from the single file URL
43
  i2v_transformer = WanTransformer3DModel.from_single_file(
44
- I2V_SINGLE_FILE_URL,
 
45
  torch_dtype=torch.bfloat16
46
  )
47
 
@@ -58,34 +53,37 @@ except Exception as e:
58
  print(f"❌ Critical Error: Failed to load I2V pipeline from single file.")
59
  traceback.print_exc()
60
 
61
- print("\nπŸš€ Loading T2V pipeline from single file...")
62
  t2v_pipe = None
63
- if IS_T2V_AVAILABLE:
64
- try:
65
- # Load components needed for the pipeline from the base model repo
66
- t2v_vae = AutoencoderKLWan.from_pretrained(T2V_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
67
- t2v_text_encoder = CLIPTextModel.from_pretrained(T2V_MODEL_ID, subfolder="text_encoder", torch_dtype=torch.bfloat16)
68
- t2v_tokenizer = CLIPTokenizer.from_pretrained(T2V_MODEL_ID, subfolder="tokenizer")
69
-
70
- # Load the main transformer from the single file URL
71
- t2v_transformer = WanTransformer3DModel.from_single_file(
72
- T2V_SINGLE_FILE_URL,
73
- torch_dtype=torch.bfloat16
74
- )
 
 
 
 
75
 
76
- # Manually assemble the pipeline with the custom transformer
77
- t2v_pipe = WanTextToVideoPipeline(
78
- vae=t2v_vae,
79
- text_encoder=t2v_text_encoder,
80
- tokenizer=t2v_tokenizer,
81
- transformer=t2v_transformer
82
- )
83
- t2v_pipe.scheduler = UniPCMultistepScheduler.from_config(t2v_pipe.scheduler.config, flow_shift=8.0)
84
- t2v_pipe.to("cuda")
85
- print("βœ… T2V pipeline loaded successfully from single file.")
86
- except Exception as e:
87
- print(f"❌ Critical Error: Failed to load T2V pipeline from single file.")
88
- traceback.print_exc()
89
 
90
  # --- LLM Prompt Enhancer Setup ---
91
  print("\nπŸ€– Loading LLM for Prompt Enhancement (Qwen/Qwen3-8B)...")
@@ -149,6 +147,7 @@ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
149
  MAX_SEED = np.iinfo(np.int32).max
150
 
151
  FIXED_FPS = 24
 
152
  MIN_FRAMES_MODEL = 8
153
  MAX_FRAMES_MODEL = 81
154
 
@@ -462,7 +461,7 @@ def generate_t2v_video(prompt, height, width,
462
 
463
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
464
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
465
- num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
466
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
467
  enhanced_prompt = f"{prompt}, cinematic, high detail, professional lighting"
468
 
@@ -482,7 +481,7 @@ def generate_t2v_video(prompt, height, width,
482
  filename = f"t2v_{sanitized_prompt}_{current_seed}.mp4"
483
  temp_dir = tempfile.mkdtemp()
484
  video_path = os.path.join(temp_dir, filename)
485
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
486
 
487
  return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"πŸ“₯ Download: {filename}")
488
 
@@ -530,7 +529,7 @@ with gr.Blocks(css=custom_css) as demo:
530
 
531
  # --- Text-to-Video Tab ---
532
  with gr.TabItem("✍️ Text-to-Video", id="t2v_tab", interactive=t2v_pipe is not None):
533
- if not IS_T2V_AVAILABLE or t2v_pipe is None:
534
  gr.Markdown("<h3 style='color: #ff9999; text-align: center;'>⚠️ Text-to-Video Pipeline Failed to Load. This tab is disabled.</h3>")
535
  else:
536
  with gr.Row():
@@ -548,7 +547,7 @@ with gr.Blocks(css=custom_css) as demo:
548
  minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
549
  maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
550
  step=0.1, value=2, label="⏱️ Duration (seconds)",
551
- info=f"Generates {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
552
  )
553
  with gr.Accordion("βš™οΈ Advanced Settings", open=False):
554
  t2v_neg_prompt = gr.Textbox(label="❌ Negative Prompt", value=default_negative_prompt, lines=4)
@@ -558,7 +557,7 @@ with gr.Blocks(css=custom_css) as demo:
558
  t2v_height = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"πŸ“ Height ({MOD_VALUE}px steps)")
559
  t2v_width = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"πŸ“ Width ({MOD_VALUE}px steps)")
560
  t2v_steps = gr.Slider(minimum=1, maximum=25, step=1, value=15, label="πŸš€ Inference Steps", info="15-20 recommended for quality.")
561
- t2v_guidance = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=7.5, label="🎯 Guidance Scale")
562
 
563
  t2v_generate_btn = gr.Button("🎬 Generate T2V", variant="primary", elem_classes=["generate-btn"])
564
 
 
1
  import spaces
2
  import torch
3
+ from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler, WanTransformer3DModel, AutoModel, DiffusionPipeline
4
  from diffusers.utils import export_to_video
5
+ from transformers import CLIPVisionModel, UMT5EncoderModel
 
 
 
 
 
 
 
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # noqa
7
  import tempfile
8
  import re
 
16
  import random
17
 
18
  # --- I2V (Image-to-Video) Configuration ---
19
+ I2V_BASE_MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" # Used for VAE/encoder components
20
+ I2V_FUSIONX_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
21
+ I2V_FUSIONX_FILENAME = "Wan14Bi2vFusioniX.safetensors"
22
 
23
  # --- T2V (Text-to-Video) Configuration ---
24
+ T2V_BASE_MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
25
+ T2V_LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
26
+ T2V_LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
27
 
28
  # --- Load Pipelines ---
29
  print("πŸš€ Loading I2V pipeline from single file...")
30
  i2v_pipe = None
31
  try:
32
  # Load components needed for the pipeline from the base model repo
33
+ i2v_image_encoder = CLIPVisionModel.from_pretrained(I2V_BASE_MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
34
+ i2v_vae = AutoencoderKLWan.from_pretrained(I2V_BASE_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
35
 
36
+ # Load the main transformer from the repo and filename
37
  i2v_transformer = WanTransformer3DModel.from_single_file(
38
+ I2V_FUSIONX_REPO_ID,
39
+ filename=I2V_FUSIONX_FILENAME,
40
  torch_dtype=torch.bfloat16
41
  )
42
 
 
53
  print(f"❌ Critical Error: Failed to load I2V pipeline from single file.")
54
  traceback.print_exc()
55
 
56
+ print("\nπŸš€ Loading T2V pipeline with LoRA...")
57
  t2v_pipe = None
58
+ try:
59
+
60
+ # Load components needed for the T2V pipeline
61
+ text_encoder = UMT5EncoderModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="text_encoder", torch_dtype=torch.bfloat16)
62
+ vae = AutoModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
63
+ transformer = AutoModel.from_pretrained(T2V_BASE_MODEL_ID, subfolder="transformer", torch_dtype=torch.bfloat16)
64
+
65
+ # Assemble the final pipeline
66
+ t2v_pipe = DiffusionPipeline.from_pretrained(
67
+ "Wan-AI/Wan2.1-T2V-14B-Diffusers",
68
+ vae=vae,
69
+ transformer=transformer,
70
+ text_encoder=text_encoder,
71
+ torch_dtype=torch.bfloat16
72
+ )
73
+ t2v_pipe.to("cuda")
74
 
75
+ t2v_pipe.load_lora_weights(
76
+ T2V_LORA_REPO_ID,
77
+ weight_name=T2V_LORA_FILENAME,
78
+ adapter_name="fusionx_t2v"
79
+ )
80
+ t2v_pipe.set_adapters(["fusionx_t2v"], adapter_weights=[0.75])
81
+
82
+
83
+ print("βœ… T2V pipeline and LoRA loaded and fused successfully.")
84
+ except Exception as e:
85
+ print(f"❌ Critical Error: Failed to load T2V pipeline.")
86
+ traceback.print_exc()
 
87
 
88
  # --- LLM Prompt Enhancer Setup ---
89
  print("\nπŸ€– Loading LLM for Prompt Enhancement (Qwen/Qwen3-8B)...")
 
147
  MAX_SEED = np.iinfo(np.int32).max
148
 
149
  FIXED_FPS = 24
150
+ T2V_FIXED_FPS = 16
151
  MIN_FRAMES_MODEL = 8
152
  MAX_FRAMES_MODEL = 81
153
 
 
461
 
462
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
463
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
464
+ num_frames = np.clip(int(round(duration_seconds * T2V_FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
465
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
466
  enhanced_prompt = f"{prompt}, cinematic, high detail, professional lighting"
467
 
 
481
  filename = f"t2v_{sanitized_prompt}_{current_seed}.mp4"
482
  temp_dir = tempfile.mkdtemp()
483
  video_path = os.path.join(temp_dir, filename)
484
+ export_to_video(output_frames_list, video_path, fps=T2V_FIXED_FPS)
485
 
486
  return video_path, current_seed, gr.File(value=video_path, visible=True, label=f"πŸ“₯ Download: {filename}")
487
 
 
529
 
530
  # --- Text-to-Video Tab ---
531
  with gr.TabItem("✍️ Text-to-Video", id="t2v_tab", interactive=t2v_pipe is not None):
532
+ if t2v_pipe is None:
533
  gr.Markdown("<h3 style='color: #ff9999; text-align: center;'>⚠️ Text-to-Video Pipeline Failed to Load. This tab is disabled.</h3>")
534
  else:
535
  with gr.Row():
 
547
  minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
548
  maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
549
  step=0.1, value=2, label="⏱️ Duration (seconds)",
550
+ info=f"Generates {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {T2V_FIXED_FPS}fps."
551
  )
552
  with gr.Accordion("βš™οΈ Advanced Settings", open=False):
553
  t2v_neg_prompt = gr.Textbox(label="❌ Negative Prompt", value=default_negative_prompt, lines=4)
 
557
  t2v_height = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"πŸ“ Height ({MOD_VALUE}px steps)")
558
  t2v_width = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"πŸ“ Width ({MOD_VALUE}px steps)")
559
  t2v_steps = gr.Slider(minimum=1, maximum=25, step=1, value=15, label="πŸš€ Inference Steps", info="15-20 recommended for quality.")
560
+ t2v_guidance = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=5.0, label="🎯 Guidance Scale")
561
 
562
  t2v_generate_btn = gr.Button("🎬 Generate T2V", variant="primary", elem_classes=["generate-btn"])
563