Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -156,7 +156,8 @@ def infer(
|
|
| 156 |
print('-- filtered prompt --')
|
| 157 |
print(enhanced_prompt)
|
| 158 |
if latent_file: # Check if a latent file is provided
|
| 159 |
-
sd_image_a = torch.load(latent_file.name) # Load the latent
|
|
|
|
| 160 |
print("-- using latent file --")
|
| 161 |
print('-- generating image --')
|
| 162 |
with torch.no_grad():
|
|
@@ -167,7 +168,7 @@ def infer(
|
|
| 167 |
num_inference_steps=num_inference_steps,
|
| 168 |
width=width,
|
| 169 |
height=height,
|
| 170 |
-
latents=
|
| 171 |
generator=generator
|
| 172 |
).images[0]
|
| 173 |
else:
|
|
|
|
| 156 |
print('-- filtered prompt --')
|
| 157 |
print(enhanced_prompt)
|
| 158 |
if latent_file: # Check if a latent file is provided
|
| 159 |
+
sd_image_a = torch.load(latent_file.name).to('cuda') # Load the latent
|
| 160 |
+
sd_image_b = vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
| 161 |
print("-- using latent file --")
|
| 162 |
print('-- generating image --')
|
| 163 |
with torch.no_grad():
|
|
|
|
| 168 |
num_inference_steps=num_inference_steps,
|
| 169 |
width=width,
|
| 170 |
height=height,
|
| 171 |
+
latents=sd_image_b,
|
| 172 |
generator=generator
|
| 173 |
).images[0]
|
| 174 |
else:
|