Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -103,10 +103,22 @@ def get_image_embedding(image_path):
|
|
| 103 |
with torch.no_grad():
|
| 104 |
image_embedding = model.get_image_features(**inputs).numpy().flatten()
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
image_embedding = pca.fit_transform(image_embedding.reshape(1, -1)).flatten()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
return image_embedding.tolist()
|
| 112 |
|
|
|
|
| 103 |
with torch.no_grad():
|
| 104 |
image_embedding = model.get_image_features(**inputs).numpy().flatten()
|
| 105 |
|
| 106 |
+
# Print the actual embedding dimension
|
| 107 |
+
print(f"Image embedding shape: {image_embedding.shape}")
|
| 108 |
+
|
| 109 |
+
# CASE 1: Embedding is already 384-dimensional ✅
|
| 110 |
+
if len(image_embedding) == 384:
|
| 111 |
+
return image_embedding.tolist()
|
| 112 |
+
|
| 113 |
+
# CASE 2: Embedding is larger than 384 (e.g., 512) → Apply PCA ✅
|
| 114 |
+
elif len(image_embedding) > 384:
|
| 115 |
+
pca = PCA(n_components=384)
|
| 116 |
image_embedding = pca.fit_transform(image_embedding.reshape(1, -1)).flatten()
|
| 117 |
+
|
| 118 |
+
# CASE 3: Embedding is smaller than 384 → Apply Padding ❌
|
| 119 |
+
else:
|
| 120 |
+
padding = np.zeros(384 - len(image_embedding)) # Create padding vector
|
| 121 |
+
image_embedding = np.concatenate((image_embedding, padding)) # Append padding
|
| 122 |
|
| 123 |
return image_embedding.tolist()
|
| 124 |
|