Update app.py
Browse files
app.py
CHANGED
@@ -146,7 +146,6 @@ def load_and_prepare_model():
|
|
146 |
vae=None,
|
147 |
)
|
148 |
|
149 |
-
pipe.scheduler=scheduler
|
150 |
#pipe.tokenizer=tokenizer_1
|
151 |
#pipe.tokenizer_2=tokenizer_2
|
152 |
#pipe.unet=unet
|
@@ -171,8 +170,8 @@ def load_and_prepare_model():
|
|
171 |
|
172 |
#pipe.vae.force_upcast=True
|
173 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
174 |
-
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin", low_cpu_mem_usage=False,token=HF_TOKEN)
|
175 |
-
|
176 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
177 |
#pipe.unet.to(memory_format=torch.channels_last)
|
178 |
#pipe.enable_vae_tiling()
|
@@ -181,9 +180,10 @@ def load_and_prepare_model():
|
|
181 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
182 |
#pipe.unet.set_default_attn_processor()
|
183 |
|
184 |
-
#**** BETTER WAY ****#
|
185 |
pipe.to(device, torch.bfloat16)
|
186 |
-
|
|
|
|
|
187 |
pipe.vae = vaeXL.to(device) #.to(torch.bfloat16)
|
188 |
pipe.vae.set_default_attn_processor()
|
189 |
#pipe.to(device)
|
|
|
146 |
vae=None,
|
147 |
)
|
148 |
|
|
|
149 |
#pipe.tokenizer=tokenizer_1
|
150 |
#pipe.tokenizer_2=tokenizer_2
|
151 |
#pipe.unet=unet
|
|
|
170 |
|
171 |
#pipe.vae.force_upcast=True
|
172 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
173 |
+
#pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin", low_cpu_mem_usage=False,token=HF_TOKEN)
|
174 |
+
|
175 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
176 |
#pipe.unet.to(memory_format=torch.channels_last)
|
177 |
#pipe.enable_vae_tiling()
|
|
|
180 |
#pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
181 |
#pipe.unet.set_default_attn_processor()
|
182 |
|
|
|
183 |
pipe.to(device, torch.bfloat16)
|
184 |
+
pipe.scheduler=scheduler
|
185 |
+
pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.spritesheet', weight_name='spritesheet.safetensors', adapter_name="skin", low_cpu_mem_usage=False).to(torch.bfloat16)
|
186 |
+
|
187 |
pipe.vae = vaeXL.to(device) #.to(torch.bfloat16)
|
188 |
pipe.vae.set_default_attn_processor()
|
189 |
#pipe.to(device)
|