ford442 commited on
Commit
1315b05
·
verified ·
1 Parent(s): 49d1e27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -65,13 +65,13 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
65
  pipe = StableDiffusion3Pipeline.from_pretrained(
66
  "stabilityai/stable-diffusion-3.5-large",
67
  #"ford442/stable-diffusion-3.5-large-bf16",
68
- vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='vae',token=True)
69
- transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='transformer',token=True)
70
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder',token=True),
71
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
72
  text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
73
  token=True,
74
- use_safetensors=False,
75
  )
76
 
77
  pipe.to(device=device, dtype=torch.bfloat16)
 
65
  pipe = StableDiffusion3Pipeline.from_pretrained(
66
  "stabilityai/stable-diffusion-3.5-large",
67
  #"ford442/stable-diffusion-3.5-large-bf16",
68
+ vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='vae',token=True),
69
+ transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='transformer',token=True),
70
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder',token=True),
71
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
72
  text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
73
  token=True,
74
+ #use_safetensors=False,
75
  )
76
 
77
  pipe.to(device=device, dtype=torch.bfloat16)