File size: 1,698 Bytes
e074ee9 5fadd6e e074ee9 30bfbf8 e074ee9 d28a2eb e074ee9 5fadd6e e074ee9 5fadd6e d28a2eb 5fadd6e e074ee9 d28a2eb 5fadd6e 30bfbf8 e074ee9 30bfbf8 47bffaa 30bfbf8 e074ee9 5fadd6e e074ee9 47bffaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
from transformers import CLIPModel, CLIPFeatureExtractor, BertTokenizer
from PIL import Image
import torch
import torch.nn.functional as F
# Load model and processors separately
model_name = "jinaai/jina-clip-v1"
model = CLIPModel.from_pretrained(model_name)
feature_extractor = CLIPFeatureExtractor.from_pretrained(model_name)
tokenizer = BertTokenizer.from_pretrained(model_name)
def compute_similarity(image, text):
image = Image.fromarray(image) # Convert NumPy array to PIL Image
# Process image
image_inputs = feature_extractor(images=image, return_tensors="pt")
# Process text (Remove `token_type_ids`)
text_inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
text_inputs.pop("token_type_ids", None)
with torch.no_grad():
# Extract embeddings
image_embeds = model.get_image_features(**image_inputs)
text_embeds = model.get_text_features(**text_inputs)
# Print to debug
print("Image Embedding:", image_embeds)
print("Text Embedding:", text_embeds)
# Normalize embeddings
image_embeds = F.normalize(image_embeds, p=2, dim=-1)
text_embeds = F.normalize(text_embeds, p=2, dim=-1)
# Compute cosine similarity
similarity_score = (image_embeds @ text_embeds.T).item()
return similarity_score
# Gradio UI
demo = gr.Interface(
fn=compute_similarity,
inputs=[gr.Image(type="numpy"), gr.Textbox(label="Enter text")],
outputs=gr.Number(label="Similarity Score"),
title="JinaAI CLIP Image-Text Similarity",
description="Upload an image and enter a text prompt to get the similarity score."
)
demo.launch() |