thankfulcarp commited on
Commit
bb449c5
Β·
1 Parent(s): 7d3e4a8

Alternative loading lora method

Browse files
Files changed (1) hide show
  1. app.py +24 -31
app.py CHANGED
@@ -7,6 +7,7 @@ import gradio as gr
7
  import tempfile
8
  import re
9
  import os
 
10
 
11
  from huggingface_hub import hf_hub_download
12
  import numpy as np
@@ -18,8 +19,7 @@ MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers"
18
 
19
  # Merged FusionX enhancement LoRA
20
  LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
21
- LORA_FILENAME = "Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
22
- LORA_SUBFOLDER = "FusionX_LoRa"
23
 
24
  # Load enhanced model components
25
  print("πŸš€ Loading FusionX Enhanced Wan2.1 I2V Model...")
@@ -33,34 +33,36 @@ pipe = WanImageToVideoPipeline.from_pretrained(
33
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
34
  pipe.to("cuda")
35
 
36
- # Load the single merged FusionX LoRA
37
  try:
38
  lora_path = hf_hub_download(
39
  repo_id=LORA_REPO_ID,
40
- filename=LORA_FILENAME,
41
- subfolder=LORA_SUBFOLDER
42
  )
43
- # Load the LoRA weights into the transformer model component
44
- pipe.load_lora_weights(lora_path, adapter_name="fusionx")
45
- # Set the adapter to be active
46
- pipe.set_adapters(["fusionx"])
47
- print("βœ… FusionX LoRA loaded and activated. Use the slider to control its strength.")
48
- except Exception as e:
49
- print(f"⚠️ FusionX LoRA not loaded: {e}")
50
 
 
 
 
51
 
52
  MOD_VALUE = 32
53
- DEFAULT_H_SLIDER_VALUE = 576 # FusionX optimized default
54
- DEFAULT_W_SLIDER_VALUE = 1024 # FusionX optimized default
55
- NEW_FORMULA_MAX_AREA = 576.0 * 1024.0 # Updated for FusionX
56
 
57
- SLIDER_MIN_H, SLIDER_MAX_H = 128, 1080
58
- SLIDER_MIN_W, SLIDER_MAX_W = 128, 1920
59
  MAX_SEED = np.iinfo(np.int32).max
60
 
61
  FIXED_FPS = 24
62
  MIN_FRAMES_MODEL = 8
63
- MAX_FRAMES_MODEL = 121 # FusionX supports up to 121 frames
64
 
65
  # Enhanced prompts for FusionX-style output
66
  default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography"
@@ -319,7 +321,7 @@ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_
319
 
320
  def get_duration(input_image, prompt, height, width,
321
  negative_prompt, duration_seconds,
322
- guidance_scale, steps, lora_scale,
323
  seed, randomize_seed,
324
  progress):
325
  # FusionX optimized duration calculation
@@ -333,7 +335,7 @@ def get_duration(input_image, prompt, height, width,
333
  @spaces.GPU(duration=get_duration)
334
  def generate_video(input_image, prompt, height, width,
335
  negative_prompt=default_negative_prompt, duration_seconds=3,
336
- guidance_scale=1, steps=8, lora_scale=1.0,
337
  seed=42, randomize_seed=False,
338
  progress=gr.Progress(track_tqdm=True)):
339
 
@@ -362,8 +364,7 @@ def generate_video(input_image, prompt, height, width,
362
  num_frames=num_frames,
363
  guidance_scale=float(guidance_scale),
364
  num_inference_steps=int(steps),
365
- generator=torch.Generator(device="cuda").manual_seed(current_seed),
366
- cross_attention_kwargs={"scale": float(lora_scale)}
367
  ).frames[0]
368
 
369
  # Create a unique filename for download
@@ -439,14 +440,6 @@ with gr.Blocks() as demo:
439
  value=DEFAULT_W_SLIDER_VALUE,
440
  label=f"πŸ“ Output Width (FusionX optimized: {MOD_VALUE} multiples)"
441
  )
442
- lora_scale_slider = gr.Slider(
443
- minimum=0.0,
444
- maximum=2.5,
445
- step=0.05,
446
- value=1.0,
447
- label="πŸ’ͺ FusionX LoRA Strength",
448
- info="Control the intensity of the FusionX effect. >1.0 for stronger effect, <1.0 for less."
449
- )
450
  steps_slider = gr.Slider(
451
  minimum=1,
452
  maximum=20,
@@ -493,7 +486,7 @@ with gr.Blocks() as demo:
493
  ui_inputs = [
494
  input_image_component, prompt_input, height_input, width_input,
495
  negative_prompt_input, duration_seconds_input,
496
- guidance_scale_input, steps_slider, lora_scale_slider, seed_input, randomize_seed_checkbox
497
  ]
498
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input, download_output])
499
 
 
7
  import tempfile
8
  import re
9
  import os
10
+ import traceback
11
 
12
  from huggingface_hub import hf_hub_download
13
  import numpy as np
 
19
 
20
  # Merged FusionX enhancement LoRA
21
  LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
22
+ LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
 
23
 
24
  # Load enhanced model components
25
  print("πŸš€ Loading FusionX Enhanced Wan2.1 I2V Model...")
 
33
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
34
  pipe.to("cuda")
35
 
36
+ # Load and fuse the single merged FusionX LoRA
37
  try:
38
  lora_path = hf_hub_download(
39
  repo_id=LORA_REPO_ID,
40
+ filename=LORA_FILENAME
 
41
  )
42
+ print("βœ… LoRA downloaded to:", lora_path)
43
+
44
+ # Load, set weight, and fuse the LoRA into the pipeline
45
+ pipe.load_lora_weights(lora_path, adapter_name="fusionx_lora")
46
+ pipe.set_adapters(["fusionx_lora"], adapter_weights=[0.75])
47
+ pipe.fuse_lora()
48
+ print("βœ… FusionX LoRA loaded and fused with a weight of 0.75.")
49
 
50
+ except Exception as e:
51
+ print("❌ Error during LoRA loading:")
52
+ traceback.print_exc()
53
 
54
  MOD_VALUE = 32
55
+ DEFAULT_H_SLIDER_VALUE = 640
56
+ DEFAULT_W_SLIDER_VALUE = 1024
57
+ NEW_FORMULA_MAX_AREA = 640.0 * 1024.0
58
 
59
+ SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024
60
+ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
63
  FIXED_FPS = 24
64
  MIN_FRAMES_MODEL = 8
65
+ MAX_FRAMES_MODEL = 81
66
 
67
  # Enhanced prompts for FusionX-style output
68
  default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography"
 
321
 
322
  def get_duration(input_image, prompt, height, width,
323
  negative_prompt, duration_seconds,
324
+ guidance_scale, steps,
325
  seed, randomize_seed,
326
  progress):
327
  # FusionX optimized duration calculation
 
335
  @spaces.GPU(duration=get_duration)
336
  def generate_video(input_image, prompt, height, width,
337
  negative_prompt=default_negative_prompt, duration_seconds=3,
338
+ guidance_scale=1, steps=8,
339
  seed=42, randomize_seed=False,
340
  progress=gr.Progress(track_tqdm=True)):
341
 
 
364
  num_frames=num_frames,
365
  guidance_scale=float(guidance_scale),
366
  num_inference_steps=int(steps),
367
+ generator=torch.Generator(device="cuda").manual_seed(current_seed)
 
368
  ).frames[0]
369
 
370
  # Create a unique filename for download
 
440
  value=DEFAULT_W_SLIDER_VALUE,
441
  label=f"πŸ“ Output Width (FusionX optimized: {MOD_VALUE} multiples)"
442
  )
 
 
 
 
 
 
 
 
443
  steps_slider = gr.Slider(
444
  minimum=1,
445
  maximum=20,
 
486
  ui_inputs = [
487
  input_image_component, prompt_input, height_input, width_input,
488
  negative_prompt_input, duration_seconds_input,
489
+ guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
490
  ]
491
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input, download_output])
492