Spaces:
Runtime error
Runtime error
Update pipeline_stable_diffusion_3_ipa.py
Browse files
pipeline_stable_diffusion_3_ipa.py
CHANGED
@@ -1176,10 +1176,16 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
|
|
1176 |
|
1177 |
# Concatenate the image embeddings
|
1178 |
## clip_image_embeds = torch.mean(torch.stack(image_prompt_embeds_list), dim=0)
|
1179 |
-
clip_image_embeds = torch.cat(image_prompt_embeds_list, dim=0).mean(dim=0) #.unsqueeze(0)
|
1180 |
-
bs_embed, seq_len = clip_image_embeds.shape
|
1181 |
-
clip_image_embeds = clip_image_embeds.view(bs_embed, seq_len) # Simplified reshape
|
1182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1183 |
# 4. Prepare timesteps
|
1184 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
1185 |
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
|
|
1176 |
|
1177 |
# Concatenate the image embeddings
|
1178 |
## clip_image_embeds = torch.mean(torch.stack(image_prompt_embeds_list), dim=0)
|
|
|
|
|
|
|
1179 |
|
1180 |
+
|
1181 |
+
# clip_image_embeds = torch.cat(image_prompt_embeds_list, dim=0).mean(dim=0) #.unsqueeze(0)
|
1182 |
+
#bs_embed, seq_len = clip_image_embeds.shape
|
1183 |
+
#clip_image_embeds = clip_image_embeds.view(bs_embed, seq_len) # Simplified reshape
|
1184 |
+
|
1185 |
+
# experimental way
|
1186 |
+
clip_image_embeds = torch.cat([torch.zeros_like(image_prompt_embeds_list), image_prompt_embeds_list], dim=0).mean(dim=0)
|
1187 |
+
|
1188 |
+
|
1189 |
# 4. Prepare timesteps
|
1190 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
1191 |
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|