linoyts HF Staff commited on
Commit
fb351d8
·
verified ·
1 Parent(s): 3096603

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -33
app.py CHANGED
@@ -45,35 +45,6 @@ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
45
  torch_dtype=torch.bfloat16,
46
  ).to('cuda')
47
 
48
- # load, fuse, unload before compilation
49
- # pipe.load_lora_weights(
50
- # "vrgamedevgirl84/Wan14BT2VFusioniX",
51
- # weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
52
- # adapter_name="phantom"
53
- # )
54
-
55
- # pipe.set_adapters(["phantom"], adapter_weights=[0.95])
56
- # pipe.fuse_lora(adapter_names=["phantom"], lora_scale=1.0)
57
- # pipe.unload_lora_weights()
58
-
59
-
60
- # pipe.load_lora_weights(
61
- # "vrgamedevgirl84/Wan14BT2VFusioniX",
62
- # weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
63
- # adapter_name="phantom"
64
- # )
65
- # kwargs = {}
66
- # kwargs["load_into_transformer_2"] = True
67
- # pipe.load_lora_weights(
68
- # "vrgamedevgirl84/Wan14BT2VFusioniX",
69
- # weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
70
- # adapter_name="phantom_2", **kwargs
71
- # )
72
- # pipe.set_adapters(["phantom", "phantom_2"], adapter_weights=[1., 1.])
73
- # pipe.fuse_lora(adapter_names=["phantom"], lora_scale=3., components=["transformer"])
74
- # pipe.fuse_lora(adapter_names=["phantom_2"], lora_scale=1., components=["transformer_2"])
75
- # pipe.unload_lora_weights()
76
-
77
  for i in range(3):
78
  gc.collect()
79
  torch.cuda.synchronize()
@@ -135,8 +106,8 @@ def generate_video(
135
  negative_prompt=default_negative_prompt,
136
  duration_seconds = MAX_DURATION,
137
  guidance_scale = 1,
138
- guidance_scale_2 = 3,
139
- steps = 6,
140
  seed = 42,
141
  randomize_seed = False,
142
  progress=gr.Progress(track_tqdm=True),
@@ -210,8 +181,8 @@ def generate_video(
210
  return video_path, current_seed
211
 
212
  with gr.Blocks() as demo:
213
- gr.Markdown("# Fast 6 steps Wan 2.2 I2V (14B) with Phantom LoRA")
214
- gr.Markdown("run Wan 2.2 in just 6-8 steps, with [FusionX Phantom LoRA by DeeJayT](https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX/tree/main/FusionX_LoRa), compatible with 🧨 diffusers")
215
  with gr.Row():
216
  with gr.Column():
217
  input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
@@ -243,6 +214,14 @@ with gr.Blocks() as demo:
243
  "wan_i2v_input.JPG",
244
  "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
245
  ],
 
 
 
 
 
 
 
 
246
  ],
247
  inputs=[input_image_component, prompt_input], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
248
  )
 
45
  torch_dtype=torch.bfloat16,
46
  ).to('cuda')
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  for i in range(3):
49
  gc.collect()
50
  torch.cuda.synchronize()
 
106
  negative_prompt=default_negative_prompt,
107
  duration_seconds = MAX_DURATION,
108
  guidance_scale = 1,
109
+ guidance_scale_2 = 1,
110
+ steps = 4,
111
  seed = 42,
112
  randomize_seed = False,
113
  progress=gr.Progress(track_tqdm=True),
 
181
  return video_path, current_seed
182
 
183
  with gr.Blocks() as demo:
184
+ gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightx2v LoRA")
185
+ gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightx2v LoRA](https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX/tree/main/FusionX_LoRa), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
186
  with gr.Row():
187
  with gr.Column():
188
  input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
 
214
  "wan_i2v_input.JPG",
215
  "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
216
  ],
217
+ [
218
+ "wan22_input_2.jpg",
219
+ "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
220
+ ],
221
+ [
222
+ "kill_bill.jpeg",
223
+ "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented.",
224
+ ],
225
  ],
226
  inputs=[input_image_component, prompt_input], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
227
  )