File size: 1,227 Bytes
ae0811f
 
 
72b2cdd
 
 
ae0811f
 
72b2cdd
ae0811f
 
72b2cdd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae0811f
9eab4da
 
ae0811f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from PIL import Image
import gradio as gr
import requests
from transformers import AutoTokenizer, AutoModel



def get_image_embedding(image):
    return {"embedding": "img_emb.tolist()"}

def get_text_embedding(text):
        # Load the tokenizer
    tokenizer = AutoTokenizer.from_pretrained("Alibaba-NLP/gte-Qwen2-1.5B-instruct")
    
    # Load the model
    model = AutoModel.from_pretrained("Alibaba-NLP/gte-Qwen2-1.5B-instruct")

    # Tokenize the input text
    text = "Your input text goes here"
    inputs = tokenizer(text, return_tensors='pt')
    
    # Get embeddings from the model
    with torch.no_grad():
    outputs = model(**inputs)
    embeddings = outputs.last_hidden_state
    
    # Process embeddings (e.g., take the mean of all token embeddings)
    sentence_embedding = embeddings.mean(dim=1)
    return {"embedding": sentence_embedding}

image_embedding = gr.Interface(fn=get_image_embedding, inputs=gr.Image(type="pil"), outputs=gr.JSON(), title="Image Embedding")
text_embedding = gr.Interface(fn=get_text_embedding, inputs=gr.Textbox(), outputs=gr.JSON(), title="Text Embedding")

space = gr.TabbedInterface([image_embedding, text_embedding], ["Image Embedding", "Text Embedding"])
space.launch()