1inkusFace commited on
Commit
6b01871
·
verified ·
1 Parent(s): b1bfbda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -61,10 +61,11 @@ transformer = SD3Transformer2DModel.from_pretrained(
61
  model_path, subfolder="transformer", torch_dtype=torch.bfloat16
62
  )
63
 
 
 
64
  pipe = StableDiffusion3Pipeline.from_pretrained(
65
  #"stabilityai # stable-diffusion-3.5-large",
66
  "ford442/stable-diffusion-3.5-large-bf16",
67
- # vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", use_safetensors=True, subfolder='vae',token=True),
68
  #scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
69
  text_encoder=None, #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
70
  text_encoder_2=None, #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
@@ -74,13 +75,14 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
74
  tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", use_fast=True, subfolder="tokenizer_3", token=True),
75
  torch_dtype=torch.bfloat16,
76
  transformer=transformer,
 
77
  #use_safetensors=False,
78
  )
79
 
80
  #pipe.to(device=device, dtype=torch.bfloat16)
81
 
82
  pipe.to(device)
83
-
84
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
85
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
86
  text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
 
61
  model_path, subfolder="transformer", torch_dtype=torch.bfloat16
62
  )
63
 
64
+ vaeX=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", safety_checker=None, use_safetensors=True, low_cpu_mem_usage=False, subfolder='vae', torch_dtype=torch.float32, token=True)
65
+
66
  pipe = StableDiffusion3Pipeline.from_pretrained(
67
  #"stabilityai # stable-diffusion-3.5-large",
68
  "ford442/stable-diffusion-3.5-large-bf16",
 
69
  #scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
70
  text_encoder=None, #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
71
  text_encoder_2=None, #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
 
75
  tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", use_fast=True, subfolder="tokenizer_3", token=True),
76
  torch_dtype=torch.bfloat16,
77
  transformer=transformer,
78
+ vae=None
79
  #use_safetensors=False,
80
  )
81
 
82
  #pipe.to(device=device, dtype=torch.bfloat16)
83
 
84
  pipe.to(device)
85
+ pipe.vae=vaeX.to(device)
86
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
87
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
88
  text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)