Update README.md
Browse files
README.md
CHANGED
|
@@ -56,10 +56,10 @@ passages = [
|
|
| 56 |
# Load model and tokenizer
|
| 57 |
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SFR-Embedding-Mistral')
|
| 58 |
model = AutoModel.from_pretrained('Salesforce/SFR-Embedding-Mistral')
|
| 59 |
-
|
| 60 |
max_length = 4096
|
| 61 |
# Tokenize the input texts
|
| 62 |
-
batch_dict = tokenizer(
|
| 63 |
outputs = model(**batch_dict)
|
| 64 |
embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
|
| 65 |
|
|
|
|
| 56 |
# Load model and tokenizer
|
| 57 |
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SFR-Embedding-Mistral')
|
| 58 |
model = AutoModel.from_pretrained('Salesforce/SFR-Embedding-Mistral')
|
| 59 |
+
input_texts = [*queries, *passages]
|
| 60 |
max_length = 4096
|
| 61 |
# Tokenize the input texts
|
| 62 |
+
batch_dict = tokenizer(input_texts, max_length=max_length, padding=True, truncation=True, return_tensors="pt")
|
| 63 |
outputs = model(**batch_dict)
|
| 64 |
embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
|
| 65 |
|