|
import gradio as gr |
|
from transformers import CLIPModel, CLIPFeatureExtractor, BertTokenizer |
|
from PIL import Image |
|
import torch |
|
import torch.nn.functional as F |
|
|
|
|
|
model_name = "jinaai/jina-clip-v1" |
|
model = CLIPModel.from_pretrained(model_name) |
|
feature_extractor = CLIPFeatureExtractor.from_pretrained(model_name) |
|
tokenizer = BertTokenizer.from_pretrained(model_name) |
|
|
|
def compute_similarity(image, text): |
|
image = Image.fromarray(image) |
|
|
|
|
|
image_inputs = feature_extractor(images=image, return_tensors="pt") |
|
|
|
|
|
text_inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) |
|
text_inputs.pop("token_type_ids", None) |
|
|
|
with torch.no_grad(): |
|
|
|
image_embeds = model.get_image_features(**image_inputs) |
|
text_embeds = model.get_text_features(**text_inputs) |
|
|
|
|
|
print("Image Embedding:", image_embeds) |
|
print("Text Embedding:", text_embeds) |
|
|
|
|
|
image_embeds = F.normalize(image_embeds, p=2, dim=-1) |
|
text_embeds = F.normalize(text_embeds, p=2, dim=-1) |
|
|
|
|
|
similarity_score = (image_embeds @ text_embeds.T).item() |
|
|
|
return similarity_score |
|
|
|
|
|
demo = gr.Interface( |
|
fn=compute_similarity, |
|
inputs=[gr.Image(type="numpy"), gr.Textbox(label="Enter text")], |
|
outputs=gr.Number(label="Similarity Score"), |
|
title="JinaAI CLIP Image-Text Similarity", |
|
description="Upload an image and enter a text prompt to get the similarity score." |
|
) |
|
|
|
demo.launch() |