Update app.py
Browse files
app.py
CHANGED
@@ -85,9 +85,9 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
|
|
85 |
#torch_dtype=torch.bfloat16,
|
86 |
#use_safetensors=False,
|
87 |
)
|
88 |
-
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(torch.device("cuda:0", dtype=torch.bfloat16)
|
89 |
-
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(torch.device("cuda:0", dtype=torch.bfloat16)
|
90 |
-
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(torch.device("cuda:0", dtype=torch.bfloat16)
|
91 |
|
92 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
93 |
|
|
|
85 |
#torch_dtype=torch.bfloat16,
|
86 |
#use_safetensors=False,
|
87 |
)
|
88 |
+
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
|
89 |
+
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
|
90 |
+
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
|
91 |
|
92 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
93 |
|