1inkusFace commited on
Commit
b435b9c
·
verified ·
1 Parent(s): 855f65a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -228,7 +228,8 @@ def generate_30(
228
  # 4. (Optional) Average the pooled embeddings
229
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
230
  print('averaged shape: ', prompt_embeds.shape)
231
- pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0,keepdim=True)
 
232
 
233
  options = {
234
  #"prompt": prompt,
@@ -311,7 +312,8 @@ def generate_60(
311
  # 4. (Optional) Average the pooled embeddings
312
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
313
  print('averaged shape: ', prompt_embeds.shape)
314
- pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0,keepdim=True)
 
315
 
316
 
317
  options = {
@@ -387,7 +389,7 @@ def generate_90(
387
  prompt_embeds_b = pipe.text_encoder(text_input_ids2.to(torch.device('cuda')), output_hidden_states=True)
388
  pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
389
  prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
390
-
391
  # 3. Concatenate the embeddings
392
  prompt_embeds = torch.cat([prompt_embeds_a, prompt_embeds_b])
393
  print('catted shape: ', prompt_embeds.shape)
@@ -395,7 +397,8 @@ def generate_90(
395
  # 4. (Optional) Average the pooled embeddings
396
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
397
  print('averaged shape: ', prompt_embeds.shape)
398
- pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0,keepdim=True)
 
399
 
400
  options = {
401
  #"prompt": prompt,
 
228
  # 4. (Optional) Average the pooled embeddings
229
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
230
  print('averaged shape: ', prompt_embeds.shape)
231
+ pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0)
232
+ print('pooled averaged shape: ', pooled_prompt_embeds.shape)
233
 
234
  options = {
235
  #"prompt": prompt,
 
312
  # 4. (Optional) Average the pooled embeddings
313
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
314
  print('averaged shape: ', prompt_embeds.shape)
315
+ pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0)
316
+ print('pooled averaged shape: ', pooled_prompt_embeds.shape)
317
 
318
 
319
  options = {
 
389
  prompt_embeds_b = pipe.text_encoder(text_input_ids2.to(torch.device('cuda')), output_hidden_states=True)
390
  pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
391
  prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
392
+
393
  # 3. Concatenate the embeddings
394
  prompt_embeds = torch.cat([prompt_embeds_a, prompt_embeds_b])
395
  print('catted shape: ', prompt_embeds.shape)
 
397
  # 4. (Optional) Average the pooled embeddings
398
  prompt_embeds = torch.mean(prompt_embeds,dim=0,keepdim=True)
399
  print('averaged shape: ', prompt_embeds.shape)
400
+ pooled_prompt_embeds = torch.mean(pooled_prompt_embeds,dim=0)
401
+ print('pooled averaged shape: ', pooled_prompt_embeds.shape)
402
 
403
  options = {
404
  #"prompt": prompt,