1inkusFace commited on
Commit
36b9065
·
verified ·
1 Parent(s): 7a7f101

Update pipeline_stable_diffusion_3_ipa.py

Browse files
Files changed (1) hide show
  1. pipeline_stable_diffusion_3_ipa.py +10 -4
pipeline_stable_diffusion_3_ipa.py CHANGED
@@ -1214,16 +1214,22 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
1214
  zeros_tensor = torch.zeros_like(average_image_embed)
1215
  print('shape 3: ', zeros_tensor.shape)
1216
  # 4. Concatenate the zeros and the average embedding
1217
- clip_image_embeds = torch.cat([zeros_tensor, average_image_embed], dim=0)
1218
  print('shape 4: ', clip_image_embeds.shape)
1219
- clip_image_embeds = clip_image_embeds.unsqueeze(0) # Add a dimension at the beginning so now you have [1, 2*seq_len_img, embed_dim_img]
1220
  print('shape 5: ', clip_image_embeds.shape)
1221
 
1222
  bs_embed, seq_len, _ = clip_image_embeds.shape
1223
  clip_image_embeds = clip_image_embeds.repeat(1, 1, 1)
1224
  print('shape 6: ', clip_image_embeds.shape)
1225
- clip_image_embeds = clip_image_embeds.view(bs_embed, 1, -1)
1226
- print('shape 7: ', clip_image_embeds.shape)
 
 
 
 
 
 
1227
  #clip_image_embeds = torch.cat([torch.stack(image_prompt_embeds_list)], dim=0).mean(dim=0)
1228
 
1229
  # 4. Prepare timesteps
 
1214
  zeros_tensor = torch.zeros_like(average_image_embed)
1215
  print('shape 3: ', zeros_tensor.shape)
1216
  # 4. Concatenate the zeros and the average embedding
1217
+ clip_image_embeds = torch.cat([zeros_tensor, average_image_embed], dim=1)
1218
  print('shape 4: ', clip_image_embeds.shape)
1219
+ #clip_image_embeds = clip_image_embeds.unsqueeze(0) # Add a dimension at the beginning so now you have [1, 2*seq_len_img, embed_dim_img]
1220
  print('shape 5: ', clip_image_embeds.shape)
1221
 
1222
  bs_embed, seq_len, _ = clip_image_embeds.shape
1223
  clip_image_embeds = clip_image_embeds.repeat(1, 1, 1)
1224
  print('shape 6: ', clip_image_embeds.shape)
1225
+
1226
+ clip_image_embedsa = clip_image_embeds.view(bs_embed, 1, -1)
1227
+ print('shape 7: ', clip_image_embedsa.shape)
1228
+ clip_image_embedsb = clip_image_embeds.view(seq_len, -1)
1229
+ print('shape 8: ', clip_image_embedsb.shape)
1230
+
1231
+ clip_image_embeds = clip_image_embedsb
1232
+
1233
  #clip_image_embeds = torch.cat([torch.stack(image_prompt_embeds_list)], dim=0).mean(dim=0)
1234
 
1235
  # 4. Prepare timesteps