File size: 545 Bytes
3babd16 921fcc1 b01e5b1 932317b b01e5b1 3babd16 3c4d86c fb76362 3babd16 325991d c0b0792 3babd16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
from sentence_transformers import SentenceTransformer, util
from PIL import Image
from io import BytesIO
import gradio as gr
import requests
def get_image_embedding(image):
image_model = SentenceTransformer('clip-ViT-B-32')
# Load and preprocess the image
img_emb = image_model.encode(image)
print(img_emb)
print(type(img_emb))
return {"prediction": img_emb.tolist()}
image_input = gr.Image(type="pil")
label_output = gr.JSON()
gr.Interface(fn=get_image_embedding, inputs=image_input, outputs=label_output).launch()
|