ford442 commited on
Commit
fdf9fd3
·
1 Parent(s): f0d0565

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -106,16 +106,15 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
106
  def load_and_prepare_model(model_id):
107
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
108
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
109
- vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None).to(device).to(torch.bfloat16)
110
  #vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",use_safetensors=True, torch_dtype=torch.float32,safety_checker=None).to(device).to(torch.bfloat16)
111
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
112
  # vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
113
  pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
114
- pipeX.unet.to(device).to(torch.bfloat16)
115
  pipe = StableDiffusionXLPipeline.from_pretrained(
116
  model_id,
117
- torch_dtype=torch.bfloat16,
118
- add_watermarker=False,
119
  use_safetensors=True,
120
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
121
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
@@ -127,12 +126,12 @@ def load_and_prepare_model(model_id):
127
  )
128
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
129
  #pipe.to('cuda')
 
130
  # pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
131
  #pipe.to(dtype=torch.bfloat16)
 
132
  pipe.to(device)
133
  pipe.to(torch.bfloat16)
134
- pipe.vae=vae
135
- pipe.unet = pipeX.unet
136
  #pipe.to(device, torch.bfloat16)
137
  del pipeX
138
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
 
106
  def load_and_prepare_model(model_id):
107
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
108
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
109
+ vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",safety_checker=None)
110
  #vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",use_safetensors=True, torch_dtype=torch.float32,safety_checker=None).to(device).to(torch.bfloat16)
111
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
112
  # vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
113
  pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
 
114
  pipe = StableDiffusionXLPipeline.from_pretrained(
115
  model_id,
116
+ #torch_dtype=torch.bfloat16,
117
+ # add_watermarker=False,
118
  use_safetensors=True,
119
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
120
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
 
126
  )
127
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
128
  #pipe.to('cuda')
129
+ pipe.vae=vae
130
  # pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
131
  #pipe.to(dtype=torch.bfloat16)
132
+ pipe.unet = pipeX.unet
133
  pipe.to(device)
134
  pipe.to(torch.bfloat16)
 
 
135
  #pipe.to(device, torch.bfloat16)
136
  del pipeX
137
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")