Update pipeline_stable_diffusion_3_ipa.py
Browse files
pipeline_stable_diffusion_3_ipa.py
CHANGED
@@ -1185,9 +1185,9 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
|
|
1185 |
# experimental way
|
1186 |
#clip_image_embeds = torch.cat([torch.zeros_like(torch.stack(image_prompt_embeds_list)), torch.stack(image_prompt_embeds_list)], dim=0).mean(dim=0)
|
1187 |
# FAILS clip_image_embeds = torch.cat(torch.stack(image_prompt_embeds_list), dim=0).mean(dim=0)
|
1188 |
-
clip_image_embeds = torch.cat(image_prompt_embeds_list, dim=0).mean(dim=0)
|
1189 |
-
|
1190 |
-
|
1191 |
|
1192 |
# 4. Prepare timesteps
|
1193 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
|
|
1185 |
# experimental way
|
1186 |
#clip_image_embeds = torch.cat([torch.zeros_like(torch.stack(image_prompt_embeds_list)), torch.stack(image_prompt_embeds_list)], dim=0).mean(dim=0)
|
1187 |
# FAILS clip_image_embeds = torch.cat(torch.stack(image_prompt_embeds_list), dim=0).mean(dim=0)
|
1188 |
+
# FAILS TIMESTEPS clip_image_embeds = torch.cat(image_prompt_embeds_list, dim=0).mean(dim=0)
|
1189 |
+
clip_image_embeds = torch.mean(torch.stack(image_prompt_embeds_list), dim=0)
|
1190 |
+
#clip_image_embeds = torch.cat([torch.stack(image_prompt_embeds_list)], dim=0).mean(dim=0)
|
1191 |
|
1192 |
# 4. Prepare timesteps
|
1193 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|