ford442 commited on
Commit
44c103a
·
verified ·
1 Parent(s): 77eb9f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -158,10 +158,10 @@ def infer(
158
  if latent_file: # Check if a latent file is provided
159
  initial_latents = pipe.prepare_latents(
160
  batch_size=1,
161
- num_channels_latents=pipe.unet.in_channels,
162
- height=pipe.unet.sample_size[0],
163
- width=pipe.unet.sample_size[1],
164
- dtype=pipe.unet.dtype,
165
  device=pipe.device,
166
  generator=generator,
167
  )
@@ -205,10 +205,10 @@ def infer(
205
  generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
206
  initial_latents = pipe.prepare_latents(
207
  batch_size=1,
208
- num_channels_latents=pipe.unet.in_channels,
209
- height=pipe.unet.sample_size[0],
210
- width=pipe.unet.sample_size[1],
211
- dtype=pipe.unet.dtype,
212
  device=pipe.device,
213
  generator=generator,
214
  )
 
158
  if latent_file: # Check if a latent file is provided
159
  initial_latents = pipe.prepare_latents(
160
  batch_size=1,
161
+ num_channels_latents=pipe.transformer.in_channels,
162
+ height=pipe.transformer.sample_size[0],
163
+ width=pipe.transformer.sample_size[1],
164
+ dtype=pipe.transformer.dtype,
165
  device=pipe.device,
166
  generator=generator,
167
  )
 
205
  generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
206
  initial_latents = pipe.prepare_latents(
207
  batch_size=1,
208
+ num_channels_latents=pipe.transformer.in_channels,
209
+ height=pipe.transformer.sample_size[0],
210
+ width=pipe.transformer.sample_size[1],
211
+ dtype=pipe.transformer.dtype,
212
  device=pipe.device,
213
  generator=generator,
214
  )