import gradio as gr from transformers import CLIPModel, CLIPFeatureExtractor, BertTokenizer from PIL import Image import torch import torch.nn.functional as F # Load model and processors separately model_name = "jinaai/jina-clip-v1" model = CLIPModel.from_pretrained(model_name) feature_extractor = CLIPFeatureExtractor.from_pretrained(model_name) tokenizer = BertTokenizer.from_pretrained(model_name) def compute_similarity(image, text): image = Image.fromarray(image) # Convert NumPy array to PIL Image # Process image image_inputs = feature_extractor(images=image, return_tensors="pt") # Process text (Remove `token_type_ids`) text_inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) text_inputs.pop("token_type_ids", None) with torch.no_grad(): # Extract embeddings image_embeds = model.get_image_features(**image_inputs) text_embeds = model.get_text_features(**text_inputs) # Normalize embeddings image_embeds = F.normalize(image_embeds, p=2, dim=-1) text_embeds = F.normalize(text_embeds, p=2, dim=-1) # Compute cosine similarity similarity_score = (image_embeds @ text_embeds.T).item() return similarity_score # Gradio UI demo = gr.Interface( fn=compute_similarity, inputs=[gr.Image(type="numpy"), gr.Textbox(label="Enter text")], outputs=gr.Number(label="Similarity Score"), title="JinaAI CLIP Image-Text Similarity", description="Upload an image and enter a text prompt to get the similarity score." ) demo.launch()