Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -113,20 +113,22 @@ def load_and_prepare_model(model_id):
|
|
113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
|
114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
115 |
model_id,
|
116 |
-
|
117 |
add_watermarker=False,
|
118 |
use_safetensors=True,
|
119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
120 |
# vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
121 |
# vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
|
122 |
-
|
|
|
|
|
123 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
124 |
)
|
125 |
-
pipe.vae=vae
|
126 |
-
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
127 |
#pipe.to('cuda')
|
128 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
129 |
-
|
130 |
#pipe.to(dtype=torch.bfloat16)
|
131 |
pipe.to(device)
|
132 |
pipe.to(torch.bfloat16)
|
|
|
113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
|
114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
115 |
model_id,
|
116 |
+
torch_dtype=torch.bfloat16,
|
117 |
add_watermarker=False,
|
118 |
use_safetensors=True,
|
119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
120 |
# vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
121 |
# vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
|
122 |
+
vae=vae,
|
123 |
+
unet=pipeX.unet
|
124 |
+
scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
125 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
126 |
)
|
127 |
+
#pipe.vae=vae
|
128 |
+
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
129 |
#pipe.to('cuda')
|
130 |
# pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
131 |
+
# pipe.unet = pipeX.unet
|
132 |
#pipe.to(dtype=torch.bfloat16)
|
133 |
pipe.to(device)
|
134 |
pipe.to(torch.bfloat16)
|