Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -103,7 +103,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
103 |
|
104 |
def load_and_prepare_model():
|
105 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False,torch_dtype=torch.float32,token=HF_TOKEN).to(device)
|
106 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear"
|
107 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
108 |
#'John6666/uber-realistic-porn-merge-ponyxl-urpm-ponyxlhybridv1-sdxl',
|
109 |
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
@@ -131,15 +131,11 @@ def load_and_prepare_model():
|
|
131 |
|
132 |
'''
|
133 |
|
134 |
-
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
135 |
-
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
136 |
-
|
137 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
138 |
#pipe.vae = vaeX #.to(torch.bfloat16)
|
139 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
140 |
|
141 |
#pipe.vae.force_upcast=True
|
142 |
-
|
143 |
pipe.scheduler = sched
|
144 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
145 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|
|
|
103 |
|
104 |
def load_and_prepare_model():
|
105 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False,torch_dtype=torch.float32,token=HF_TOKEN).to(device)
|
106 |
+
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16',token=HF_TOKEN, subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=False)
|
107 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
108 |
#'John6666/uber-realistic-porn-merge-ponyxl-urpm-ponyxlhybridv1-sdxl',
|
109 |
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
|
|
131 |
|
132 |
'''
|
133 |
|
|
|
|
|
|
|
134 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
135 |
#pipe.vae = vaeX #.to(torch.bfloat16)
|
136 |
#**** NEVER TO BFLOAT BEFORE CUDA****#
|
137 |
|
138 |
#pipe.vae.force_upcast=True
|
|
|
139 |
pipe.scheduler = sched
|
140 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
141 |
pipe.vae.set_default_attn_processor() # Set attention processor first
|