Update app.py
Browse files
app.py
CHANGED
@@ -7,23 +7,36 @@ import numpy as np
|
|
7 |
# Load JinaAI CLIP model
|
8 |
model = AutoModel.from_pretrained('jinaai/jina-clip-v1', trust_remote_code=True)
|
9 |
|
10 |
-
def compute_similarity(
|
11 |
"""
|
12 |
Computes similarity between:
|
13 |
-
-
|
14 |
-
- Image
|
15 |
-
- Text
|
16 |
"""
|
17 |
-
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# Ensure valid input
|
25 |
if not (input1_is_text or input1_is_image) or not (input2_is_text or input2_is_image):
|
26 |
-
return "Error:
|
27 |
|
28 |
try:
|
29 |
with torch.no_grad():
|
@@ -59,32 +72,34 @@ def compute_similarity(input1, input2):
|
|
59 |
return f"Error: {str(e)}"
|
60 |
|
61 |
# Gradio UI
|
62 |
-
|
63 |
-
|
64 |
-
inputs
|
65 |
-
|
66 |
-
|
67 |
-
gr.
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
|
90 |
demo.launch()
|
|
|
7 |
# Load JinaAI CLIP model
|
8 |
model = AutoModel.from_pretrained('jinaai/jina-clip-v1', trust_remote_code=True)
|
9 |
|
10 |
+
def compute_similarity(input1_type, input1_text, input1_image, input2_type, input2_text, input2_image):
|
11 |
"""
|
12 |
Computes similarity between:
|
13 |
+
- Text-Text
|
14 |
+
- Image-Image
|
15 |
+
- Text-Image & Image-Text
|
16 |
"""
|
17 |
+
|
18 |
+
# Determine input types
|
19 |
+
if input1_type == "Text":
|
20 |
+
input1 = input1_text.strip()
|
21 |
+
input1_is_text = bool(input1)
|
22 |
+
input1_is_image = False
|
23 |
+
else:
|
24 |
+
input1 = input1_image
|
25 |
+
input1_is_text = False
|
26 |
+
input1_is_image = input1 is not None
|
27 |
+
|
28 |
+
if input2_type == "Text":
|
29 |
+
input2 = input2_text.strip()
|
30 |
+
input2_is_text = bool(input2)
|
31 |
+
input2_is_image = False
|
32 |
+
else:
|
33 |
+
input2 = input2_image
|
34 |
+
input2_is_text = False
|
35 |
+
input2_is_image = input2 is not None
|
36 |
|
37 |
# Ensure valid input
|
38 |
if not (input1_is_text or input1_is_image) or not (input2_is_text or input2_is_image):
|
39 |
+
return "Error: Please provide valid inputs (text or image) for both fields!"
|
40 |
|
41 |
try:
|
42 |
with torch.no_grad():
|
|
|
72 |
return f"Error: {str(e)}"
|
73 |
|
74 |
# Gradio UI
|
75 |
+
with gr.Blocks() as demo:
|
76 |
+
gr.Markdown("# JinaAI CLIP Multimodal Similarity")
|
77 |
+
gr.Markdown("Compare similarity between two inputs: **Text-Text, Image-Image, or Image-Text**.")
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
input1_type = gr.Radio(["Text", "Image"], label="Input 1 Type", value="Text")
|
81 |
+
input2_type = gr.Radio(["Text", "Image"], label="Input 2 Type", value="Text")
|
82 |
+
|
83 |
+
input1_text = gr.Textbox(label="Input 1 (Text)", visible=True)
|
84 |
+
input1_image = gr.Image(type="numpy", label="Input 1 (Image)", visible=False)
|
85 |
+
|
86 |
+
input2_text = gr.Textbox(label="Input 2 (Text)", visible=True)
|
87 |
+
input2_image = gr.Image(type="numpy", label="Input 2 (Image)", visible=False)
|
88 |
+
|
89 |
+
output = gr.Textbox(label="Similarity Score / Error", interactive=False)
|
90 |
+
|
91 |
+
def update_visibility(input1_type, input2_type):
|
92 |
+
return (
|
93 |
+
input1_type == "Text", # Input 1 text visibility
|
94 |
+
input1_type == "Image", # Input 1 image visibility
|
95 |
+
input2_type == "Text", # Input 2 text visibility
|
96 |
+
input2_type == "Image" # Input 2 image visibility
|
97 |
+
)
|
98 |
+
|
99 |
+
input1_type.change(update_visibility, inputs=[input1_type, input2_type], outputs=[input1_text, input1_image, input2_text, input2_image])
|
100 |
+
input2_type.change(update_visibility, inputs=[input1_type, input2_type], outputs=[input1_text, input1_image, input2_text, input2_image])
|
101 |
+
|
102 |
+
compute_button = gr.Button("Compute Similarity")
|
103 |
+
compute_button.click(compute_similarity, inputs=[input1_type, input1_text, input1_image, input2_type, input2_text, input2_image], outputs=output)
|
104 |
|
105 |
demo.launch()
|