Spaces:
Running
on
Zero
Running
on
Zero
add dual lora loading (#1)
Browse files- add dual lora loading (55f8abb7e38c743cef07c6c9ecd712593d43b8ff)
- Update app.py (989357c225509fca4f985bafcbc1578035ea17f5)
- app.py +23 -6
- requirements.txt +1 -1
app.py
CHANGED
@@ -46,17 +46,34 @@ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
|
|
46 |
).to('cuda')
|
47 |
|
48 |
# load, fuse, unload before compilation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
pipe.load_lora_weights(
|
50 |
"vrgamedevgirl84/Wan14BT2VFusioniX",
|
51 |
weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
|
52 |
adapter_name="phantom"
|
53 |
)
|
54 |
-
|
55 |
-
|
56 |
-
pipe.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
pipe.unload_lora_weights()
|
58 |
|
59 |
-
|
60 |
optimize_pipeline_(pipe,
|
61 |
image=Image.new('RGB', (LANDSCAPE_WIDTH, LANDSCAPE_HEIGHT)),
|
62 |
prompt='prompt',
|
@@ -201,8 +218,8 @@ with gr.Blocks() as demo:
|
|
201 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
202 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
203 |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
|
204 |
-
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=
|
205 |
-
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=
|
206 |
|
207 |
generate_button = gr.Button("Generate Video", variant="primary")
|
208 |
with gr.Column():
|
|
|
46 |
).to('cuda')
|
47 |
|
48 |
# load, fuse, unload before compilation
|
49 |
+
# pipe.load_lora_weights(
|
50 |
+
# "vrgamedevgirl84/Wan14BT2VFusioniX",
|
51 |
+
# weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
|
52 |
+
# adapter_name="phantom"
|
53 |
+
# )
|
54 |
+
|
55 |
+
# pipe.set_adapters(["phantom"], adapter_weights=[0.95])
|
56 |
+
# pipe.fuse_lora(adapter_names=["phantom"], lora_scale=1.0)
|
57 |
+
# pipe.unload_lora_weights()
|
58 |
+
|
59 |
+
|
60 |
pipe.load_lora_weights(
|
61 |
"vrgamedevgirl84/Wan14BT2VFusioniX",
|
62 |
weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
|
63 |
adapter_name="phantom"
|
64 |
)
|
65 |
+
kwargs = {}
|
66 |
+
kwargs["load_into_transformer_2"] = True
|
67 |
+
pipe.load_lora_weights(
|
68 |
+
"vrgamedevgirl84/Wan14BT2VFusioniX",
|
69 |
+
weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
|
70 |
+
adapter_name="phantom_2", **kwargs
|
71 |
+
)
|
72 |
+
pipe.set_adapters(["phantom", "phantom_2"], adapter_weights=[1., 1.])
|
73 |
+
pipe.fuse_lora(adapter_names=["phantom"], lora_scale=3., components=["transformer"])
|
74 |
+
pipe.fuse_lora(adapter_names=["phantom_2"], lora_scale=1., components=["transformer_2"])
|
75 |
pipe.unload_lora_weights()
|
76 |
|
|
|
77 |
optimize_pipeline_(pipe,
|
78 |
image=Image.new('RGB', (LANDSCAPE_WIDTH, LANDSCAPE_HEIGHT)),
|
79 |
prompt='prompt',
|
|
|
218 |
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
|
219 |
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
|
220 |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
|
221 |
+
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=3, label="Guidance Scale - high noise stage")
|
222 |
+
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
|
223 |
|
224 |
generate_button = gr.Button("Generate Video", variant="primary")
|
225 |
with gr.Column():
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
git+https://github.com/
|
2 |
transformers
|
3 |
accelerate
|
4 |
safetensors
|
|
|
1 |
+
git+https://github.com/linoytsaban/diffusers.git@wan22-loras
|
2 |
transformers
|
3 |
accelerate
|
4 |
safetensors
|