JasonTPhillipsJr commited on
Commit
78d5be0
·
verified ·
1 Parent(s): 4b2b2d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -6
app.py CHANGED
@@ -74,6 +74,7 @@ def process_entity(batch, model, device):
74
 
75
  # Convert tensor to list of token IDs, and decode them into a readable sentence
76
  pseudo_sentence_decoded = bert_tokenizer.decode(pseudo_sentence[0].tolist(), skip_special_tokens=False)
 
77
 
78
  with torch.no_grad():
79
  outputs = spaBERT_model(#input_ids=input_ids,
@@ -98,12 +99,6 @@ for batch in (data_loader):
98
  spaBERT_embedding, input_ids, pseudo_sentence = process_entity(batch, spaBERT_model, device)
99
  spaBERT_embeddings.append(spaBERT_embedding)
100
  pseudo_sentences.append(pseudo_sentence)
101
-
102
- # Print all the pseudo sentences
103
- print("Pseudo Sentences:")
104
- for idx, sentence in enumerate(pseudo_sentences):
105
- print(f"{idx + 1}: {sentence}")
106
-
107
 
108
  embedding_cache = {}
109
 
 
74
 
75
  # Convert tensor to list of token IDs, and decode them into a readable sentence
76
  pseudo_sentence_decoded = bert_tokenizer.decode(pseudo_sentence[0].tolist(), skip_special_tokens=False)
77
+ st.write("Pseudo Sentence: ", pseudo_sentence_decoded)
78
 
79
  with torch.no_grad():
80
  outputs = spaBERT_model(#input_ids=input_ids,
 
99
  spaBERT_embedding, input_ids, pseudo_sentence = process_entity(batch, spaBERT_model, device)
100
  spaBERT_embeddings.append(spaBERT_embedding)
101
  pseudo_sentences.append(pseudo_sentence)
 
 
 
 
 
 
102
 
103
  embedding_cache = {}
104