Poor performance for long texts
#40
by
satorean
- opened
I noticed drastic execution time increases for longer texts (more than couple 100 tokens) compared to JINA CLIP v1 text encoder. Nomic Text 1.5 encoder only outperforms JINA v1 text encoder for small texts (execution time). So the advertised 8000 token limit is impractical. Any plans to improve performance? There probably is some quadratic time complexity in the model and not in JINA CLIP v1 text encoder?
import torch
import torch.nn.functional as F
from sentence_transformers import SentenceTransformer
from transformers import AutoImageProcessor, AutoModel, AutoTokenizer
import numpy as np
from PIL import Image
from pillow_heif import register_heif_opener
register_heif_opener() # Patch pillow to support .heic
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
class Model:
def __init__(self):
tokenizer = tokenizer = AutoTokenizer.from_pretrained(
"bert-base-uncased", model_max_length=8192
)
model = model = AutoModel.from_pretrained(
"nomic-ai/nomic-embed-text-v1.5",
trust_remote_code=True,
rotary_scaling_factor=2,
)
model.eval()
self.text_model = model
self.text_tokenizer = tokenizer
self.image_processor = AutoImageProcessor.from_pretrained(
"nomic-ai/nomic-embed-vision-v1.5"
)
self.vision_model = AutoModel.from_pretrained(
"nomic-ai/nomic-embed-vision-v1.5", trust_remote_code=True
)
def encode_query(self, input_text: str) -> np.ndarray:
return self.encode_text("search_query: " + input_text)
def encode_document(self, input_text: str) -> np.ndarray:
return self.encode_text("search_document: " + input_text)
def encode_text(self, input_text: str) -> np.ndarray:
encoded_input = self.text_tokenizer(
[input_text], padding=True, truncation=True, return_tensors="pt"
)
with torch.no_grad():
model_output = self.text_model(**encoded_input)
embeddings = mean_pooling(model_output, encoded_input["attention_mask"])
embeddings = F.normalize(embeddings, p=2, dim=1)
return embeddings.numpy().astype(np.float32)
def encode_image(self, image_input: str | Image.Image):
if isinstance(image_input, str):
image = Image.open(image_input)
elif isinstance(image_input, Image.Image):
image = image_input
inputs = self.image_processor(image, return_tensors="pt")
with torch.no_grad():
img_emb = self.vision_model(**inputs).last_hidden_state
img_embeddings = F.normalize(img_emb[:, 0], p=2, dim=1)
return img_embeddings.squeeze().numpy().astype(np.float32)