File size: 1,153 Bytes
b866409
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from sentence_transformers import SentenceTransformer
import torch.nn.functional as F
import torch

class Load_EmbeddingModels:
    def __init__(self, model_name ='jinaai/jina-clip-v2'):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.img_model_ID = model_name 
        self.img_model = self.get_cp_img_model_info(self.img_model_ID)
        
    def get_cp_img_model_info(self, model_name):
        print('Loading SentenceTransformer model')    
        model =SentenceTransformer(model_name, trust_remote_code=True)
        model = model.to(self.device)
        return model

    def get_single_image_embedding_cp_im(self, my_image):    
        embedding = self.img_model.encode(
            my_image,
            normalize_embeddings=True
        )
        values = embedding.tolist()
        return values

class Get_EmbeddingModels:
    def __init__(self, model_name='jinaai/jina-clip-v2'):
        self.embed_model = Load_EmbeddingModels(model_name)
    
    def get_dense_embd(self, img):
        embd = self.embed_model.get_single_image_embedding_cp_im(img)
        return embd