File size: 12,758 Bytes
bd5010f
3b235ec
 
fbfffae
 
 
 
 
 
 
88b35be
fbfffae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74eecc2
 
fbfffae
 
 
 
 
 
 
 
 
b5d66be
 
fbfffae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c694458
fbfffae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5d66be
fbfffae
 
 
 
 
 
b5d66be
fbfffae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5d66be
 
fbfffae
b5d66be
ccbacb7
fbfffae
 
 
 
b5d66be
fbfffae
 
 
b5d66be
fbfffae
 
 
 
b5d66be
fbfffae
 
 
 
 
 
 
 
b5d66be
fbfffae
 
 
 
 
 
 
 
 
 
 
 
 
 
74eecc2
a9999ab
3b235ec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
import sys
sys.stdout.reconfigure(line_buffering=True) 

import os
import numpy as np
import requests
import cv2
from skimage import feature
from io import BytesIO
import traceback

from flask import Flask, request, jsonify
from PIL import Image

# import deep learning libraries 
import torch
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection, AutoTokenizer, AutoModel
from segment_anything import SamPredictor, sam_model_registry

app = Flask(__name__)

# sum = 1
FEATURE_WEIGHTS = {
    "shape": 0.4,
    "color": 0.5,
    "texture": 0.1
}

# threshold
FINAL_SCORE_THRESHOLD = 0.5


# load all models
print("="*50)
print("πŸš€ Initializing application and loading models...")
device_name = os.environ.get("device", "cpu")
device = torch.device('cuda' if 'cuda' in device_name and torch.cuda.is_available() else 'cpu')
print(f"🧠 Using device: {device}")

print("...Loading Grounding DINO model...")
# --- ⬇️ CORRECTED MODEL ID ⬇️ ---
gnd_model_id = "IDEA-Research/grounding-dino-base" 
processor_gnd = AutoProcessor.from_pretrained(gnd_model_id)
model_gnd = AutoModelForZeroShotObjectDetection.from_pretrained(gnd_model_id).to(device)

print("...Loading Segment Anything (SAM) model...")
sam_checkpoint = "sam_vit_b_01ec64.pth" 
sam_model = sam_model_registry["vit_b"](checkpoint=sam_checkpoint).to(device)
predictor = SamPredictor(sam_model)

print("...Loading BGE model for text embeddings...")
# --- ⬇️ UPGRADED MODEL ⬇️ ---
bge_model_id = "BAAI/bge-large-en-v1.5"
tokenizer_text = AutoTokenizer.from_pretrained(bge_model_id)
model_text = AutoModel.from_pretrained(bge_model_id).to(device)
print("βœ… All models loaded successfully.")
print("="*50)


# helper functions

def get_canonical_label(object_name_phrase: str) -> str:
    print(f"\n  [Label] Extracting label for: '{object_name_phrase}'")
    label = object_name_phrase.strip().lower().split()[-1]
    label = ''.join(filter(str.isalpha, label))
    print(f"  [Label] βœ… Extracted label: '{label}'")
    return label if label else "unknown"

def download_image_from_url(image_url: str) -> Image.Image:
    print(f"  [Download] Downloading image from: {image_url[:80]}...")
    response = requests.get(image_url)
    response.raise_for_status()
    image = Image.open(BytesIO(response.content))
    image_rgb = image.convert("RGB")
    print("  [Download] βœ… Image downloaded and standardized to RGB.")
    return image_rgb

def detect_and_crop(image: Image.Image, object_name: str) -> Image.Image:
    print(f"\n  [Detect & Crop] Starting detection for object: '{object_name}'")
    image_np = np.array(image.convert("RGB"))
    height, width = image_np.shape[:2]
    prompt = [[f"a {object_name}"]]
    inputs = processor_gnd(images=image, text=prompt, return_tensors="pt").to(device)
    with torch.no_grad():
        outputs = model_gnd(**inputs)
    results = processor_gnd.post_process_grounded_object_detection(
        outputs, inputs.input_ids, threshold=0.4, text_threshold=0.3, target_sizes=[(height, width)]
    )
    if not results or len(results[0]['boxes']) == 0:
        print("  [Detect & Crop] ⚠ Warning: Grounding DINO did not detect the object. Using full image.")
        return image
    result = results[0]
    scores = result['scores']
    max_idx = int(torch.argmax(scores))
    box = result['boxes'][max_idx].cpu().numpy().astype(int)
    print(f"  [Detect & Crop] βœ… Object detected with confidence: {scores[max_idx]:.2f}, Box: {box}")
    x1, y1, x2, y2 = box
    
    predictor.set_image(image_np)
    box_prompt = np.array([[x1, y1, x2, y2]])
    masks, _, _ = predictor.predict(box=box_prompt, multimask_output=False)
    mask = masks[0]
    
    mask_bool = mask > 0
    cropped_img_rgba = np.zeros((height, width, 4), dtype=np.uint8)
    cropped_img_rgba[:, :, :3] = image_np
    cropped_img_rgba[:, :, 3] = mask_bool * 255
    
    cropped_img_rgba = cropped_img_rgba[y1:y2, x1:x2]

    object_image = Image.fromarray(cropped_img_rgba, 'RGBA')
    return object_image

def extract_features(segmented_image: Image.Image) -> dict:
    image_rgba = np.array(segmented_image)
    if image_rgba.shape[2] != 4:
        raise ValueError("Segmented image must be RGBA")
    
    b, g, r, a = cv2.split(image_rgba)
    image_rgb = cv2.merge((b, g, r))
    mask = a
    
    gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    hu_moments = cv2.HuMoments(cv2.moments(contours[0])).flatten() if contours else np.zeros(7)
    
    color_hist = cv2.calcHist([image_rgb], [0, 1, 2], mask, [8, 8, 8], [0, 256, 0, 256, 0, 256])
    cv2.normalize(color_hist, color_hist)
    color_hist = color_hist.flatten()
    
    gray_masked = cv2.bitwise_and(gray, gray, mask=mask)
    lbp = feature.local_binary_pattern(gray_masked, P=24, R=3, method="uniform")
    (texture_hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, 27), range=(0, 26))
    texture_hist = texture_hist.astype("float32")
    texture_hist /= (texture_hist.sum() + 1e-6)
    
    return {
        "shape_features": hu_moments.tolist(),
        "color_features": color_hist.tolist(),
        "texture_features": texture_hist.tolist()
    }

def get_text_embedding(text: str) -> list:
    print(f"  [Embedding] Generating text embedding for: '{text[:50]}...'")
    text_with_instruction = f"Represent this sentence for searching relevant passages: {text}"
    inputs = tokenizer_text(text_with_instruction, return_tensors='pt', padding=True, truncation=True, max_length=512).to(device)
    with torch.no_grad():
        outputs = model_text(**inputs)
    embedding = outputs.last_hidden_state[:, 0, :]
    embedding = torch.nn.functional.normalize(embedding, p=2, dim=1)
    print("  [Embedding] βœ… Text embedding generated.")
    return embedding.cpu().numpy()[0].tolist()

def cosine_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:
    return float(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))

# API endpoints

@app.route('/process', methods=['POST'])
def process_item():
    """
    Receives item details, processes them, and returns all computed features.
    This is called when a new item is created in the Node.js backend.
    """
    print("\n" + "="*50)
    print("➑ [Request] Received new request to /process")
    try:
        data = request.get_json()
        if not data:
            return jsonify({"error": "Invalid JSON payload"}), 400

        object_name = data.get('objectName')
        description = data.get('objectDescription')
        image_url = data.get('objectImage') # This can now be null

        if not all([object_name, description]):
            return jsonify({"error": "objectName and objectDescription are required."}), 400

        # process text based features
        canonical_label = get_canonical_label(object_name)
        text_embedding = get_text_embedding(description)

        response_data = {
            "canonicalLabel": canonical_label,
            "text_embedding": text_embedding,
        }

        # process visual features ONLY if an image_url is provided
        if image_url:
            print("--- Image URL provided, processing visual features... ---")
            image = download_image_from_url(image_url)
            object_crop = detect_and_crop(image, canonical_label)
            visual_features = extract_features(object_crop)
            # Add visual features to the response
            response_data.update(visual_features)
        else:
            print("--- No image URL provided, skipping visual feature extraction. ---")
        
        print("βœ… Successfully processed item.")
        print("="*50)
        return jsonify(response_data), 200

    except Exception as e:
        print(f"❌ Error in /process: {e}")
        traceback.print_exc()
        return jsonify({"error": str(e)}), 500

def stretch_image_score(score):
    if score < 0.4 or score == 1.0:
        return score
    # increase confidence
    return 0.7 + (score - 0.4) * (0.99 - 0.7) / (1.0 - 0.4)

@app.route('/compare', methods=['POST'])
def compare_items():
    print("\n" + "="*50)
    print("➑ [Request] Received new request to /compare")
    try:
        data = request.get_json()
        if not data:
            return jsonify({"error": "Invalid JSON payload"}), 400
        
        query_item = data.get('queryItem')
        search_list = data.get('searchList')

        if not all([query_item, search_list]):
            return jsonify({"error": "queryItem and searchList are required."}), 400
        
        query_text_emb = np.array(query_item['text_embedding'])
        results = []
        print(f"--- Comparing 1 query item against {len(search_list)} items ---")

        for item in search_list:
            item_id = item.get('_id')
            print(f"\n  [Checking] Item ID: {item_id}")
            try:
                # Text comparison is always done
                text_emb_found = np.array(item['text_embedding'])
                text_score = cosine_similarity(query_text_emb, text_emb_found)
                print(f"        - Text Score: {text_score:.4f}")

                # --- NEW: Check if BOTH items have visual features ---
                has_query_image = 'shape_features' in query_item and query_item['shape_features']
                has_item_image = 'shape_features' in item and item['shape_features']

                if has_query_image and has_item_image:
                    print("        - Both items have images. Performing visual comparison.")
                    # If both have images, proceed with full comparison
                    query_shape_feat = np.array(query_item['shape_features'])
                    query_color_feat = np.array(query_item['color_features']).astype("float32")
                    query_texture_feat = np.array(query_item['texture_features']).astype("float32")
                    
                    found_shape = np.array(item['shape_features'])
                    found_color = np.array(item['color_features']).astype("float32")
                    found_texture = np.array(item['texture_features']).astype("float32")

                    shape_dist = cv2.matchShapes(query_shape_feat, found_shape, cv2.CONTOURS_MATCH_I1, 0.0)
                    shape_score = 1.0 / (1.0 + shape_dist)
                    color_score = cv2.compareHist(query_color_feat, found_color, cv2.HISTCMP_CORREL)
                    texture_score = cv2.compareHist(query_texture_feat, found_texture, cv2.HISTCMP_CORREL)

                    raw_image_score = (FEATURE_WEIGHTS["shape"] * shape_score +
                                     FEATURE_WEIGHTS["color"] * color_score +
                                     FEATURE_WEIGHTS["texture"] * texture_score)

                    print(f"        - Raw Image Score: {raw_image_score:.4f}")
                    
                    image_score = stretch_image_score(raw_image_score)
                    
                    # Weighted average of image and text scores
                    final_score = 0.4 * image_score + 0.6 * text_score
                    print(f"        - Image Score: {image_score:.4f} | Final Score: {final_score:.4f}")
                
                else:
                    # If one or both items lack an image, the final score is JUST the text score
                    print("        - One or both items missing image. Using text score only.")
                    final_score = text_score

                # Check if the final score meets the threshold
                if final_score >= FINAL_SCORE_THRESHOLD:
                    print(f"        - βœ… ACCEPTED (Score >= {FINAL_SCORE_THRESHOLD})")
                    results.append({
                        "_id": item_id,
                        "score": round(final_score, 4),
                        "objectName": item.get("objectName"),
                        "objectDescription": item.get("objectDescription"),
                        "objectImage": item.get("objectImage"),
                    })
                else:
                    print(f"        - ❌ REJECTED (Score < {FINAL_SCORE_THRESHOLD})")

            except Exception as e:
                print(f"  [Skipping] Item {item_id} due to processing error: {e}")
                continue
        
        results.sort(key=lambda x: x["score"], reverse=True)
        print(f"\nβœ… Search complete. Found {len(results)} potential matches.")
        print("="*50)
        return jsonify({"matches": results}), 200

    except Exception as e:
        print(f"❌ Error in /compare: {e}")
        traceback.print_exc()
        return jsonify({"error": str(e)}), 500
        
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)