Update app.py
Browse files
app.py
CHANGED
|
@@ -110,20 +110,20 @@ def load_and_prepare_model(model_id):
|
|
| 110 |
# vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float32,safety_checker=None)
|
| 111 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
| 112 |
# vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
|
| 113 |
-
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0")
|
| 114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 115 |
model_id,
|
| 116 |
-
|
| 117 |
add_watermarker=False,
|
| 118 |
use_safetensors=True,
|
| 119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
| 120 |
-
vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None),
|
| 121 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
| 122 |
)
|
| 123 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
| 124 |
#pipe.to('cuda')
|
| 125 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 126 |
-
|
| 127 |
#pipe.to(dtype=torch.bfloat16)
|
| 128 |
pipe.to(device)
|
| 129 |
pipe.to(torch.bfloat16)
|
|
|
|
| 110 |
# vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float32,safety_checker=None)
|
| 111 |
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
| 112 |
# vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
|
| 113 |
+
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0", torch_dtype=torch.float32)
|
| 114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 115 |
model_id,
|
| 116 |
+
torch_dtype=torch.bfloat16,
|
| 117 |
add_watermarker=False,
|
| 118 |
use_safetensors=True,
|
| 119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
| 120 |
+
vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
| 121 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
| 122 |
)
|
| 123 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
| 124 |
#pipe.to('cuda')
|
| 125 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 126 |
+
# pipe.unet = pipeX.unet.to(torch.bfloat16)
|
| 127 |
#pipe.to(dtype=torch.bfloat16)
|
| 128 |
pipe.to(device)
|
| 129 |
pipe.to(torch.bfloat16)
|