sohamnk commited on
Commit
cb8fd55
Β·
verified Β·
1 Parent(s): a5cacc2
Files changed (1) hide show
  1. pipeline/logic.py +96 -0
pipeline/logic.py CHANGED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pipeline/logic.py
2
+ import numpy as np
3
+ import requests
4
+ import cv2
5
+ from skimage import feature
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ import torch
9
+
10
+ def get_canonical_label(object_name_phrase: str) -> str:
11
+ print(f"\n [Label] Extracting label for: '{object_name_phrase}'")
12
+ label = object_name_phrase.strip().lower().split()[-1]
13
+ label = ''.join(filter(str.isalpha, label))
14
+ print(f" [Label] βœ… Extracted label: '{label}'")
15
+ return label if label else "unknown"
16
+
17
+ def download_image_from_url(image_url: str) -> Image.Image:
18
+ print(f" [Download] Downloading image from: {image_url[:80]}...")
19
+ response = requests.get(image_url)
20
+ response.raise_for_status()
21
+ image = Image.open(BytesIO(response.content))
22
+ image_rgb = image.convert("RGB")
23
+ print(" [Download] βœ… Image downloaded and standardized to RGB.")
24
+ return image_rgb
25
+
26
+ def detect_and_crop(image: Image.Image, object_name: str, models: dict) -> Image.Image:
27
+ print(f"\n [Detect & Crop] Starting detection for object: '{object_name}'")
28
+ image_np = np.array(image.convert("RGB"))
29
+ height, width = image_np.shape[:2]
30
+ prompt = [[f"a {object_name}"]]
31
+ inputs = models['processor_gnd'](images=image, text=prompt, return_tensors="pt").to(models['device'])
32
+ with torch.no_grad():
33
+ outputs = models['model_gnd'](**inputs)
34
+ results = models['processor_gnd'].post_process_grounded_object_detection(
35
+ outputs, inputs.input_ids, box_threshold=0.4, text_threshold=0.3, target_sizes=[(height, width)]
36
+ )
37
+ if not results or len(results[0]['boxes']) == 0:
38
+ print(" [Detect & Crop] ⚠ Warning: Grounding DINO did not detect the object. Using full image.")
39
+ return image
40
+ result = results[0]
41
+ scores = result['scores']
42
+ max_idx = int(torch.argmax(scores))
43
+ box = result['boxes'][max_idx].cpu().numpy().astype(int)
44
+ print(f" [Detect & Crop] βœ… Object detected with confidence: {scores[max_idx]:.2f}, Box: {box}")
45
+ x1, y1, x2, y2 = box
46
+ models['predictor'].set_image(image_np)
47
+ box_prompt = np.array([[x1, y1, x2, y2]])
48
+ masks, _, _ = models['predictor'].predict(box=box_prompt, multimask_output=False)
49
+ mask = masks[0]
50
+ mask_bool = mask > 0
51
+ cropped_img_rgba = np.zeros((height, width, 4), dtype=np.uint8)
52
+ cropped_img_rgba[:, :, :3] = image_np
53
+ cropped_img_rgba[:, :, 3] = mask_bool * 255
54
+ cropped_img_rgba = cropped_img_rgba[y1:y2, x1:x2]
55
+ return Image.fromarray(cropped_img_rgba, 'RGBA')
56
+
57
+ def extract_features(segmented_image: Image.Image) -> dict:
58
+ image_rgba = np.array(segmented_image)
59
+ if image_rgba.shape[2] != 4: raise ValueError("Segmented image must be RGBA")
60
+ b, g, r, a = cv2.split(image_rgba)
61
+ image_rgb = cv2.merge((b, g, r))
62
+ mask = a
63
+ gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
64
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
65
+ hu_moments = cv2.HuMoments(cv2.moments(contours[0])).flatten() if contours else np.zeros(7)
66
+ color_hist = cv2.calcHist([image_rgb], [0, 1, 2], mask, [8, 8, 8], [0, 256, 0, 256, 0, 256])
67
+ cv2.normalize(color_hist, color_hist)
68
+ color_hist = color_hist.flatten()
69
+ gray_masked = cv2.bitwise_and(gray, gray, mask=mask)
70
+ lbp = feature.local_binary_pattern(gray_masked, P=24, R=3, method="uniform")
71
+ (texture_hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, 27), range=(0, 26))
72
+ texture_hist = texture_hist.astype("float32")
73
+ texture_hist /= (texture_hist.sum() + 1e-6)
74
+ return {
75
+ "shape_features": hu_moments.tolist(),
76
+ "color_features": color_hist.tolist(),
77
+ "texture_features": texture_hist.tolist()
78
+ }
79
+
80
+ def get_text_embedding(text: str, models: dict) -> list:
81
+ print(f" [Embedding] Generating text embedding for: '{text[:50]}...'")
82
+ text_with_instruction = f"Represent this sentence for searching relevant passages: {text}"
83
+ inputs = models['tokenizer_text'](text_with_instruction, return_tensors='pt', padding=True, truncation=True, max_length=512).to(models['device'])
84
+ with torch.no_grad():
85
+ outputs = models['model_text'](**inputs)
86
+ embedding = outputs.last_hidden_state[:, 0, :]
87
+ embedding = torch.nn.functional.normalize(embedding, p=2, dim=1)
88
+ print(" [Embedding] βœ… Text embedding generated.")
89
+ return embedding.cpu().numpy()[0].tolist()
90
+
91
+ def cosine_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:
92
+ return float(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))
93
+
94
+ def stretch_image_score(score):
95
+ if score < 0.4 or score == 1.0: return score
96
+ return 0.7 + (score - 0.4) * (0.99 - 0.7) / (1.0 - 0.4)