Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -156,7 +156,17 @@ def infer(
|
|
156 |
print('-- filtered prompt --')
|
157 |
print(enhanced_prompt)
|
158 |
if latent_file: # Check if a latent file is provided
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
#sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
161 |
print("-- using latent file --")
|
162 |
print('-- generating image --')
|
@@ -168,7 +178,7 @@ def infer(
|
|
168 |
num_inference_steps=num_inference_steps,
|
169 |
width=width,
|
170 |
height=height,
|
171 |
-
latents=
|
172 |
generator=generator
|
173 |
).images[0]
|
174 |
else:
|
@@ -193,9 +203,19 @@ def infer(
|
|
193 |
# Encode the generated image into latents
|
194 |
with torch.no_grad():
|
195 |
generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
# Save the latents to a .pt file
|
198 |
-
torch.save(
|
199 |
upload_to_ftp(latent_path)
|
200 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
201 |
refine = refiner(
|
|
|
156 |
print('-- filtered prompt --')
|
157 |
print(enhanced_prompt)
|
158 |
if latent_file: # Check if a latent file is provided
|
159 |
+
initial_latents = pipeline.prepare_latents(
|
160 |
+
batch_size=1,
|
161 |
+
num_channels_latents=pipeline.unet.in_channels,
|
162 |
+
height=pipeline.unet.sample_size[0],
|
163 |
+
width=pipeline.unet.sample_size[1],
|
164 |
+
dtype=pipeline.unet.dtype,
|
165 |
+
device=pipeline.device,
|
166 |
+
generator=generator,
|
167 |
+
)
|
168 |
+
sd_image_a = torch.load(latent_file.name) # Load the latent
|
169 |
+
initial_latents += sd_image_a
|
170 |
#sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
171 |
print("-- using latent file --")
|
172 |
print('-- generating image --')
|
|
|
178 |
num_inference_steps=num_inference_steps,
|
179 |
width=width,
|
180 |
height=height,
|
181 |
+
latents=initial_latents,
|
182 |
generator=generator
|
183 |
).images[0]
|
184 |
else:
|
|
|
203 |
# Encode the generated image into latents
|
204 |
with torch.no_grad():
|
205 |
generated_latents = vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
|
206 |
+
initial_latents = pipeline.prepare_latents(
|
207 |
+
batch_size=1,
|
208 |
+
num_channels_latents=pipeline.unet.in_channels,
|
209 |
+
height=pipeline.unet.sample_size[0],
|
210 |
+
width=pipeline.unet.sample_size[1],
|
211 |
+
dtype=pipeline.unet.dtype,
|
212 |
+
device=pipeline.device,
|
213 |
+
generator=generator,
|
214 |
+
)
|
215 |
+
initial_latents += generated_latents
|
216 |
+
latent_path = f"sd35m_{seed}.pt"
|
217 |
# Save the latents to a .pt file
|
218 |
+
torch.save(initial_latents, latent_path)
|
219 |
upload_to_ftp(latent_path)
|
220 |
#refiner.scheduler.set_timesteps(num_inference_steps,device)
|
221 |
refine = refiner(
|