Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -220,11 +220,11 @@ def infer(
|
|
220 |
generated_image_tensor = torch.tensor([np.array(sd_image).transpose(2, 0, 1)]).to('cuda') / 255.0
|
221 |
# Encode the generated image into latents
|
222 |
#with torch.no_grad():
|
223 |
-
generated_latents = pipe.vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
224 |
-
latent_path = f"sd35m_{seed}.pt"
|
225 |
# Save the latents to a .pt file
|
226 |
-
torch.save(generated_latents, latent_path)
|
227 |
-
upload_to_ftp(latent_path)
|
228 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
229 |
refine = refiner(
|
230 |
prompt=f"{enhanced_prompt_2}, high quality masterpiece, complex details",
|
|
|
220 |
generated_image_tensor = torch.tensor([np.array(sd_image).transpose(2, 0, 1)]).to('cuda') / 255.0
|
221 |
# Encode the generated image into latents
|
222 |
#with torch.no_grad():
|
223 |
+
# generated_latents = pipe.vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
224 |
+
#latent_path = f"sd35m_{seed}.pt"
|
225 |
# Save the latents to a .pt file
|
226 |
+
#torch.save(generated_latents, latent_path)
|
227 |
+
#upload_to_ftp(latent_path)
|
228 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
229 |
refine = refiner(
|
230 |
prompt=f"{enhanced_prompt_2}, high quality masterpiece, complex details",
|