Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -124,9 +124,9 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
|
124 |
|
125 |
def load_and_prepare_model():
|
126 |
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
127 |
-
vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
128 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
129 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
130 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
131 |
'ford442/RealVisXL_V5.0_BF16',
|
132 |
#torch_dtype=torch.bfloat16,
|
@@ -134,7 +134,7 @@ def load_and_prepare_model():
|
|
134 |
# low_cpu_mem_usage = False,
|
135 |
token=HF_TOKEN,
|
136 |
)
|
137 |
-
pipe.vae = vaeRV #.to(torch.bfloat16)
|
138 |
pipe.scheduler = sched
|
139 |
#pipe.vae.do_resize=False
|
140 |
#pipe.vae.vae_scale_factor=8
|
|
|
124 |
|
125 |
def load_and_prepare_model():
|
126 |
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
127 |
+
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None,token=HF_TOKEN).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
128 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
129 |
+
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',token=HF_TOKEN)
|
130 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
131 |
'ford442/RealVisXL_V5.0_BF16',
|
132 |
#torch_dtype=torch.bfloat16,
|
|
|
134 |
# low_cpu_mem_usage = False,
|
135 |
token=HF_TOKEN,
|
136 |
)
|
137 |
+
#pipe.vae = vaeRV #.to(torch.bfloat16)
|
138 |
pipe.scheduler = sched
|
139 |
#pipe.vae.do_resize=False
|
140 |
#pipe.vae.vae_scale_factor=8
|