Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -157,7 +157,7 @@ def infer(
|
|
157 |
print(enhanced_prompt)
|
158 |
if latent_file: # Check if a latent file is provided
|
159 |
sd_image_a = torch.load(latent_file.name).to('cuda') # Load the latent
|
160 |
-
sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
161 |
print("-- using latent file --")
|
162 |
print('-- generating image --')
|
163 |
with torch.no_grad():
|
@@ -168,7 +168,7 @@ def infer(
|
|
168 |
num_inference_steps=num_inference_steps,
|
169 |
width=width,
|
170 |
height=height,
|
171 |
-
latents=
|
172 |
generator=generator
|
173 |
).images[0]
|
174 |
else:
|
|
|
157 |
print(enhanced_prompt)
|
158 |
if latent_file: # Check if a latent file is provided
|
159 |
sd_image_a = torch.load(latent_file.name).to('cuda') # Load the latent
|
160 |
+
#sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
161 |
print("-- using latent file --")
|
162 |
print('-- generating image --')
|
163 |
with torch.no_grad():
|
|
|
168 |
num_inference_steps=num_inference_steps,
|
169 |
width=width,
|
170 |
height=height,
|
171 |
+
latents=sd_image_a,
|
172 |
generator=generator
|
173 |
).images[0]
|
174 |
else:
|