Update app.py
Browse files
app.py
CHANGED
@@ -7,15 +7,15 @@ import torch
|
|
7 |
# Load JinaAI CLIP model
|
8 |
model = AutoModel.from_pretrained("jinaai/jina-clip-v1", trust_remote_code=True)
|
9 |
|
10 |
-
# Function to process input
|
11 |
def process_input(input_data, input_type):
|
12 |
if input_type == "Text":
|
13 |
return model.encode_text([input_data]) if input_data.strip() else None
|
14 |
elif input_type == "Image":
|
15 |
-
if isinstance(input_data, np.ndarray): # Gradio provides
|
16 |
-
image = Image.fromarray(input_data) # Convert NumPy
|
17 |
-
return model.encode_image(
|
18 |
-
return None #
|
19 |
return None
|
20 |
|
21 |
# Function to compute similarity
|
@@ -41,12 +41,12 @@ def compute_similarity(input1, input2, input1_type, input2_type):
|
|
41 |
similarity_score = (embedding1 @ embedding2.T).item()
|
42 |
return f"Similarity Score: {similarity_score:.4f}"
|
43 |
|
44 |
-
# Function to
|
45 |
def update_visibility(input1_type, input2_type):
|
46 |
return (
|
47 |
-
gr.update(visible=(input1_type == "Text"), value="" if input1_type == "Image" else None),
|
48 |
-
gr.update(visible=(input1_type == "Image"), value=None),
|
49 |
-
gr.update(visible=(input2_type == "Text"), value="" if input2_type == "Image" else None),
|
50 |
gr.update(visible=(input2_type == "Image"), value=None)
|
51 |
)
|
52 |
|
@@ -68,7 +68,7 @@ with gr.Blocks() as demo:
|
|
68 |
|
69 |
output = gr.Textbox(label="Similarity Score / Error", interactive=False)
|
70 |
|
71 |
-
# Toggle visibility of inputs dynamically
|
72 |
input1_type.change(update_visibility, inputs=[input1_type, input2_type],
|
73 |
outputs=[input1_text, input1_image, input2_text, input2_image])
|
74 |
input2_type.change(update_visibility, inputs=[input1_type, input2_type],
|
|
|
7 |
# Load JinaAI CLIP model
|
8 |
model = AutoModel.from_pretrained("jinaai/jina-clip-v1", trust_remote_code=True)
|
9 |
|
10 |
+
# Function to process input
|
11 |
def process_input(input_data, input_type):
|
12 |
if input_type == "Text":
|
13 |
return model.encode_text([input_data]) if input_data.strip() else None
|
14 |
elif input_type == "Image":
|
15 |
+
if isinstance(input_data, np.ndarray): # Gradio provides NumPy array for images
|
16 |
+
image = Image.fromarray(input_data) # Convert NumPy to PIL Image
|
17 |
+
return model.encode_image(image) # Directly pass image (no list)
|
18 |
+
return None # If input is not valid
|
19 |
return None
|
20 |
|
21 |
# Function to compute similarity
|
|
|
41 |
similarity_score = (embedding1 @ embedding2.T).item()
|
42 |
return f"Similarity Score: {similarity_score:.4f}"
|
43 |
|
44 |
+
# Function to update UI dynamically
|
45 |
def update_visibility(input1_type, input2_type):
|
46 |
return (
|
47 |
+
gr.update(visible=(input1_type == "Text"), value="" if input1_type == "Image" else None),
|
48 |
+
gr.update(visible=(input1_type == "Image"), value=None),
|
49 |
+
gr.update(visible=(input2_type == "Text"), value="" if input2_type == "Image" else None),
|
50 |
gr.update(visible=(input2_type == "Image"), value=None)
|
51 |
)
|
52 |
|
|
|
68 |
|
69 |
output = gr.Textbox(label="Similarity Score / Error", interactive=False)
|
70 |
|
71 |
+
# Toggle visibility of inputs dynamically
|
72 |
input1_type.change(update_visibility, inputs=[input1_type, input2_type],
|
73 |
outputs=[input1_text, input1_image, input2_text, input2_image])
|
74 |
input2_type.change(update_visibility, inputs=[input1_type, input2_type],
|