ford442 commited on
Commit
14aac3b
Β·
1 Parent(s): 75e9d79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -106,32 +106,33 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
106
  def load_and_prepare_model(model_id):
107
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
108
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
109
- #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None).to('cuda')
110
- vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",use_safetensors=True, torch_dtype=torch.float32,safety_checker=None)
111
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
112
  # vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
113
  pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
 
114
  pipe = StableDiffusionXLPipeline.from_pretrained(
115
  model_id,
116
- # torch_dtype=torch.bfloat16,
117
  add_watermarker=False,
118
  use_safetensors=True,
119
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
120
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
121
  # vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
122
- vae=vae,
123
- unet=pipeX.unet,
124
- scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
125
  #scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
126
  )
127
- #pipe.vae=vae
128
- #pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
129
  #pipe.to('cuda')
130
  # pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
131
- # pipe.unet = pipeX.unet
132
  #pipe.to(dtype=torch.bfloat16)
133
  pipe.to(device)
134
  pipe.to(torch.bfloat16)
 
 
135
  #pipe.to(device, torch.bfloat16)
136
  del pipeX
137
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
 
106
  def load_and_prepare_model(model_id):
107
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
108
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
109
+ vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None).to(device)
110
+ #vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",use_safetensors=True, torch_dtype=torch.float32,safety_checker=None).to(device).to(torch.bfloat16)
111
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
112
  # vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None).to('cuda')
113
  pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True, torch_dtype=torch.float32)
114
+ pipeX.unet.to(device).to(torch.bfloat16)
115
  pipe = StableDiffusionXLPipeline.from_pretrained(
116
  model_id,
117
+ torch_dtype=torch.bfloat16,
118
  add_watermarker=False,
119
  use_safetensors=True,
120
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
121
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
122
  # vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
123
+ #vae=vae,
124
+ # unet=pipeX.unet,
125
+ # scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
126
  #scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
127
  )
128
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
 
129
  #pipe.to('cuda')
130
  # pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
131
  #pipe.to(dtype=torch.bfloat16)
132
  pipe.to(device)
133
  pipe.to(torch.bfloat16)
134
+ pipe.vae=vae
135
+ pipe.unet = pipeX.unet
136
  #pipe.to(device, torch.bfloat16)
137
  del pipeX
138
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")