File size: 542 Bytes
3babd16
 
921fcc1
b01e5b1
932317b
b01e5b1
3babd16
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from sentence_transformers import SentenceTransformer, util
from PIL import Image
from io import BytesIO
import gradio as gr
import requests

def get_image_embedding(image):
    image_model = SentenceTransformer('clip-ViT-B-32')

    # Load and preprocess the image
    image = Image.open(BytesIO(image))
    img_emb = image_model.encode(image)
    
    return {"prediction": img_emb}


image_input = gr.inputs.Image()
label_output = gr.outputs.Label()

gr.Interface(fn=get_image_embedding, inputs=image_input, outputs=label_output).launch()