File size: 1,049 Bytes
e074ee9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import CLIPModel, CLIPProcessor
from PIL import Image
import torch
# Load model and processor
model_name = "jinaai/jina-clip-v1"
model = CLIPModel.from_pretrained(model_name)
processor = CLIPProcessor.from_pretrained(model_name)
def compute_similarity(image, text):
image = Image.fromarray(image) # Convert NumPy array to PIL image
# Process inputs
inputs = processor(text=[text], images=image, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # Image-to-text similarity
similarity_score = logits_per_image.item()
return similarity_score
# Gradio UI
demo = gr.Interface(
fn=compute_similarity,
inputs=[gr.Image(type="numpy"), gr.Textbox(label="Enter text")],
outputs=gr.Number(label="Similarity Score"),
title="CLIP Image-Text Similarity",
description="Upload an image and enter a text prompt to get the similarity score."
)
demo.launch()
|