Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
9069d05
·
verified ·
1 Parent(s): 5684612

Upload folder using huggingface_hub

Browse files
main/stable_diffusion_xl_controlnet_reference.py CHANGED
@@ -193,7 +193,8 @@ class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPi
193
 
194
  def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
195
  refimage = refimage.to(device=device)
196
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
 
197
  self.upcast_vae()
198
  refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
199
  if refimage.dtype != self.vae.dtype:
@@ -223,6 +224,11 @@ class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPi
223
 
224
  # aligning device to prevent device errors when concating it with the latent model input
225
  ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
 
 
 
 
 
226
  return ref_image_latents
227
 
228
  def prepare_ref_image(
 
193
 
194
  def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
195
  refimage = refimage.to(device=device)
196
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
197
+ if needs_upcasting:
198
  self.upcast_vae()
199
  refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
200
  if refimage.dtype != self.vae.dtype:
 
224
 
225
  # aligning device to prevent device errors when concating it with the latent model input
226
  ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
227
+
228
+ # cast back to fp16 if needed
229
+ if needs_upcasting:
230
+ self.vae.to(dtype=torch.float16)
231
+
232
  return ref_image_latents
233
 
234
  def prepare_ref_image(
main/stable_diffusion_xl_reference.py CHANGED
@@ -139,7 +139,8 @@ def retrieve_timesteps(
139
  class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
140
  def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
141
  refimage = refimage.to(device=device)
142
- if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
 
143
  self.upcast_vae()
144
  refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
145
  if refimage.dtype != self.vae.dtype:
@@ -169,6 +170,11 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
169
 
170
  # aligning device to prevent device errors when concating it with the latent model input
171
  ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
 
 
 
 
 
172
  return ref_image_latents
173
 
174
  def prepare_ref_image(
 
139
  class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
140
  def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
141
  refimage = refimage.to(device=device)
142
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
143
+ if needs_upcasting:
144
  self.upcast_vae()
145
  refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
146
  if refimage.dtype != self.vae.dtype:
 
170
 
171
  # aligning device to prevent device errors when concating it with the latent model input
172
  ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
173
+
174
+ # cast back to fp16 if needed
175
+ if needs_upcasting:
176
+ self.vae.to(dtype=torch.float16)
177
+
178
  return ref_image_latents
179
 
180
  def prepare_ref_image(