File size: 1,368 Bytes
e074ee9 5fadd6e e074ee9 d28a2eb e074ee9 5fadd6e e074ee9 5fadd6e d28a2eb 5fadd6e e074ee9 d28a2eb 5fadd6e d28a2eb e074ee9 5fadd6e e074ee9 5fadd6e e074ee9 d28a2eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import CLIPModel, CLIPFeatureExtractor, BertTokenizer
from PIL import Image
import torch
# Load model and processors separately
model_name = "jinaai/jina-clip-v1"
model = CLIPModel.from_pretrained(model_name)
feature_extractor = CLIPFeatureExtractor.from_pretrained(model_name)
tokenizer = BertTokenizer.from_pretrained(model_name)
def compute_similarity(image, text):
image = Image.fromarray(image) # Convert NumPy array to PIL Image
# Process image
image_inputs = feature_extractor(images=image, return_tensors="pt")
# Process text (Remove `token_type_ids`)
text_inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
text_inputs.pop("token_type_ids", None) # Remove token_type_ids to avoid TypeError
with torch.no_grad():
outputs = model(**image_inputs, **text_inputs)
logits_per_image = outputs.logits_per_image # Image-to-text similarity
similarity_score = logits_per_image.item()
return similarity_score
# Gradio UI
demo = gr.Interface(
fn=compute_similarity,
inputs=[gr.Image(type="numpy"), gr.Textbox(label="Enter text")],
outputs=gr.Number(label="Similarity Score"),
title="JinaAI CLIP Image-Text Similarity",
description="Upload an image and enter a text prompt to get the similarity score."
)
demo.launch() |