File size: 504 Bytes
3babd16 b01e5b1 3babd16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
from sentence_transformers import SentenceTransformer, util
from PIL import Image
import gradio as gr
def get_image_embedding(image):
image_model = SentenceTransformer('clip-ViT-B-32')
# Load and preprocess the image
image = Image.open(BytesIO(image))
img_emb = image_model.encode(image)
return {"prediction": img_emb}
image_input = gr.inputs.Image()
label_output = gr.outputs.Label()
gr.Interface(fn=get_image_embedding, inputs=image_input, outputs=label_output).launch()
|