import gradio as gr from transformers import AutoModel from PIL import Image import torch # Load JinaAI CLIP model model = AutoModel.from_pretrained("jinaai/jina-clip-v1", trust_remote_code=True) # Function to compute similarity def compute_similarity(input1, input2, input1_type, input2_type): # Ensure inputs are valid if input1_type == "Text" and (not input1 or input1.strip() == ""): return "Error: Input 1 is empty!" if input2_type == "Text" and (not input2 or input2.strip() == ""): return "Error: Input 2 is empty!" if input1_type == "Image" and input1 is None: return "Error: Image 1 is missing!" if input2_type == "Image" and input2 is None: return "Error: Image 2 is missing!" # Encode inputs inputs = [] if input1_type == "Text": text1_embedding = model.encode_text([input1]) inputs.append(text1_embedding) elif input1_type == "Image": image1_embedding = model.encode_image([Image.fromarray(input1)]) inputs.append(image1_embedding) if input2_type == "Text": text2_embedding = model.encode_text([input2]) inputs.append(text2_embedding) elif input2_type == "Image": image2_embedding = model.encode_image([Image.fromarray(input2)]) inputs.append(image2_embedding) # Compute cosine similarity similarity_score = (inputs[0] @ inputs[1].T).item() return f"Similarity Score: {similarity_score:.4f}" # Function to toggle input fields dynamically def update_visibility(input1_type, input2_type): return ( gr.update(visible=(input1_type == "Text")), # Show text input if Text is selected gr.update(visible=(input1_type == "Image")), # Show image input if Image is selected gr.update(visible=(input2_type == "Text")), gr.update(visible=(input2_type == "Image")) ) # Gradio UI with gr.Blocks() as demo: gr.Markdown("## JinaAI CLIP Multimodal Similarity") with gr.Row(): input1_type = gr.Radio(["Text", "Image"], label="Input 1 Type", value="Text") input2_type = gr.Radio(["Text", "Image"], label="Input 2 Type", value="Image") with gr.Row(): input1_text = gr.Textbox(label="Text Input 1", visible=True) input1_image = gr.Image(type="numpy", label="Image Input 1", visible=False) with gr.Row(): input2_text = gr.Textbox(label="Text Input 2", visible=False) input2_image = gr.Image(type="numpy", label="Image Input 2", visible=True) output = gr.Textbox(label="Similarity Score / Error", interactive=False) # Toggle visibility of inputs dynamically input1_type.change(update_visibility, inputs=[input1_type, input2_type], outputs=[input1_text, input1_image, input2_text, input2_image]) input2_type.change(update_visibility, inputs=[input1_type, input2_type], outputs=[input1_text, input1_image, input2_text, input2_image]) btn = gr.Button("Compute Similarity") btn.click(compute_similarity, inputs=[input1_text, input2_text, input1_type, input2_type], outputs=output) demo.launch()