ford442 commited on
Commit
6dee80d
·
verified ·
1 Parent(s): b809e76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -12
app.py CHANGED
@@ -203,19 +203,9 @@ def infer(
203
  # Encode the generated image into latents
204
  with torch.no_grad():
205
  generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
206
- initial_latents = pipe.prepare_latents(
207
- batch_size=1,
208
- num_channels_latents=pipe.transformer.in_channels,
209
- height=pipe.transformer.config.sample_size[0],
210
- width=pipe.transformer.config.sample_size[1],
211
- dtype=pipe.transformer.dtype,
212
- device=pipe.device,
213
- generator=generator,
214
- )
215
- initial_latents += generated_latents
216
- latent_path = f"sd35m_{seed}.pt"
217
  # Save the latents to a .pt file
218
- torch.save(initial_latents, latent_path)
219
  upload_to_ftp(latent_path)
220
  #refiner.scheduler.set_timesteps(num_inference_steps,device)
221
  refine = refiner(
 
203
  # Encode the generated image into latents
204
  with torch.no_grad():
205
  generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
206
+ latent_path = f"sd35m_{seed}.pt"
 
 
 
 
 
 
 
 
 
 
207
  # Save the latents to a .pt file
208
+ torch.save(generated_latents, latent_path)
209
  upload_to_ftp(latent_path)
210
  #refiner.scheduler.set_timesteps(num_inference_steps,device)
211
  refine = refiner(