ford442 commited on
Commit
f80d9ec
·
verified ·
1 Parent(s): 4a877eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -63,13 +63,13 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
63
  #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
64
 
65
  pipe = StableDiffusion3Pipeline.from_pretrained(
66
- "stabilityai/stable-diffusion-3.5-large",
67
- #"ford442/stable-diffusion-3.5-large-bf16",
68
- vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", use_safetensors=True, subfolder='vae',token=True),
69
- transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='transformer',token=True),
70
- text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder',token=True),
71
- text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
72
- text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
73
  token=True,
74
  #use_safetensors=False,
75
  )
 
63
  #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
64
 
65
  pipe = StableDiffusion3Pipeline.from_pretrained(
66
+ #"stabilityai/stable-diffusion-3.5-large",
67
+ "ford442/stable-diffusion-3.5-large-bf16",
68
+ # vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", use_safetensors=True, subfolder='vae',token=True),
69
+ # transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='transformer',token=True),
70
+ # text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder',token=True),
71
+ # text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
72
+ # text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
73
  token=True,
74
  #use_safetensors=False,
75
  )