|
import gradio as gr |
|
from transformers import AutoModel |
|
from PIL import Image |
|
import numpy as np |
|
import torch |
|
|
|
|
|
model = AutoModel.from_pretrained("jinaai/jina-clip-v1", trust_remote_code=True) |
|
|
|
|
|
def process_input(input_data, input_type): |
|
if input_type == "Text": |
|
return model.encode_text([input_data]) if input_data.strip() else None |
|
elif input_type == "Image": |
|
if isinstance(input_data, str): |
|
image = Image.open(input_data).convert("RGB") |
|
elif isinstance(input_data, np.ndarray): |
|
image = Image.fromarray(input_data) |
|
else: |
|
return None |
|
return model.encode_image([image]) |
|
return None |
|
|
|
|
|
def compute_similarity(input1, input2, input1_type, input2_type): |
|
|
|
if input1_type == "Text" and not input1.strip(): |
|
return "Error: Input 1 is empty!" |
|
if input2_type == "Text" and not input2.strip(): |
|
return "Error: Input 2 is empty!" |
|
if input1_type == "Image" and input1 is None: |
|
return "Error: Image 1 is missing!" |
|
if input2_type == "Image" and input2 is None: |
|
return "Error: Image 2 is missing!" |
|
|
|
|
|
embedding1 = process_input(input1, input1_type) |
|
embedding2 = process_input(input2, input2_type) |
|
|
|
if embedding1 is None or embedding2 is None: |
|
return "Error: Failed to process input!" |
|
|
|
|
|
similarity_score = (embedding1 @ embedding2.T).item() |
|
return f"Similarity Score: {similarity_score:.4f}" |
|
|
|
|
|
def update_visibility(input1_type, input2_type): |
|
return ( |
|
gr.update(visible=(input1_type == "Text")), |
|
gr.update(visible=(input1_type == "Image")), |
|
gr.update(visible=(input2_type == "Text")), |
|
gr.update(visible=(input2_type == "Image")) |
|
) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## JinaAI CLIP Multimodal Similarity") |
|
|
|
with gr.Row(): |
|
input1_type = gr.Radio(["Text", "Image"], label="Input 1 Type", value="Text") |
|
input2_type = gr.Radio(["Text", "Image"], label="Input 2 Type", value="Image") |
|
|
|
with gr.Row(): |
|
input1_text = gr.Textbox(label="Text Input 1", visible=True) |
|
input1_image = gr.Image(type="numpy", interactive=True, label="Image Input 1", visible=False) |
|
|
|
with gr.Row(): |
|
input2_text = gr.Textbox(label="Text Input 2", visible=False) |
|
input2_image = gr.Image(type="numpy", interactive=True, label="Image Input 2", visible=True) |
|
|
|
output = gr.Textbox(label="Similarity Score / Error", interactive=False) |
|
|
|
|
|
input1_type.change(update_visibility, inputs=[input1_type, input2_type], |
|
outputs=[input1_text, input1_image, input2_text, input2_image]) |
|
input2_type.change(update_visibility, inputs=[input1_type, input2_type], |
|
outputs=[input1_text, input1_image, input2_text, input2_image]) |
|
|
|
btn = gr.Button("Compute Similarity") |
|
btn.click(compute_similarity, inputs=[input1_text, input2_text, input1_type, input2_type], outputs=output) |
|
|
|
demo.launch() |
|
|