sohamnk commited on
Commit
fa5fb60
·
verified ·
1 Parent(s): 8e1c8f1

Update pipeline/logic.py

Browse files
Files changed (1) hide show
  1. pipeline/logic.py +20 -2
pipeline/logic.py CHANGED
@@ -29,32 +29,50 @@ def detect_and_crop(image: Image.Image, object_name: str, models: dict) -> Image
29
  image_np = np.array(image.convert("RGB"))
30
  height, width = image_np.shape[:2]
31
  prompt = [[f"a {object_name}"]]
32
- inputs = models['processor_gnd'](images=image, text=prompt, return_tensors="pt").to(models['device'])
 
 
 
 
 
 
33
  with torch.no_grad():
34
  outputs = models['model_gnd'](**inputs)
 
 
35
  results = models['processor_gnd'].post_process_grounded_object_detection(
36
- outputs, inputs.input_ids, box_threshold=0.4, text_threshold=0.3, target_sizes=[(height, width)]
 
 
 
 
37
  )
 
38
  if not results or len(results[0]['boxes']) == 0:
39
  print(" [Detect & Crop] ⚠ Warning: Grounding DINO did not detect the object. Using full image.")
40
  return image
 
41
  result = results[0]
42
  scores = result['scores']
43
  max_idx = int(torch.argmax(scores))
44
  box = result['boxes'][max_idx].cpu().numpy().astype(int)
45
  print(f" [Detect & Crop] ✅ Object detected with confidence: {scores[max_idx]:.2f}, Box: {box}")
 
46
  x1, y1, x2, y2 = box
47
  models['predictor'].set_image(image_np)
48
  box_prompt = np.array([[x1, y1, x2, y2]])
49
  masks, _, _ = models['predictor'].predict(box=box_prompt, multimask_output=False)
50
  mask = masks[0]
51
  mask_bool = mask > 0
 
52
  cropped_img_rgba = np.zeros((height, width, 4), dtype=np.uint8)
53
  cropped_img_rgba[:, :, :3] = image_np
54
  cropped_img_rgba[:, :, 3] = mask_bool * 255
55
  cropped_img_rgba = cropped_img_rgba[y1:y2, x1:x2]
 
56
  return Image.fromarray(cropped_img_rgba, 'RGBA')
57
 
 
58
  def extract_features(segmented_image: Image.Image) -> dict:
59
  image_rgba = np.array(segmented_image)
60
  if image_rgba.shape[2] != 4: raise ValueError("Segmented image must be RGBA")
 
29
  image_np = np.array(image.convert("RGB"))
30
  height, width = image_np.shape[:2]
31
  prompt = [[f"a {object_name}"]]
32
+
33
+ inputs = models['processor_gnd'](
34
+ images=image,
35
+ text=prompt,
36
+ return_tensors="pt"
37
+ ).to(models['device'])
38
+
39
  with torch.no_grad():
40
  outputs = models['model_gnd'](**inputs)
41
+
42
+ # Updated signature: use threshold and text_threshold, no box_threshold
43
  results = models['processor_gnd'].post_process_grounded_object_detection(
44
+ outputs=outputs,
45
+ input_ids=inputs.input_ids,
46
+ threshold=0.4,
47
+ text_threshold=0.3,
48
+ target_sizes=[(height, width)]
49
  )
50
+
51
  if not results or len(results[0]['boxes']) == 0:
52
  print(" [Detect & Crop] ⚠ Warning: Grounding DINO did not detect the object. Using full image.")
53
  return image
54
+
55
  result = results[0]
56
  scores = result['scores']
57
  max_idx = int(torch.argmax(scores))
58
  box = result['boxes'][max_idx].cpu().numpy().astype(int)
59
  print(f" [Detect & Crop] ✅ Object detected with confidence: {scores[max_idx]:.2f}, Box: {box}")
60
+
61
  x1, y1, x2, y2 = box
62
  models['predictor'].set_image(image_np)
63
  box_prompt = np.array([[x1, y1, x2, y2]])
64
  masks, _, _ = models['predictor'].predict(box=box_prompt, multimask_output=False)
65
  mask = masks[0]
66
  mask_bool = mask > 0
67
+
68
  cropped_img_rgba = np.zeros((height, width, 4), dtype=np.uint8)
69
  cropped_img_rgba[:, :, :3] = image_np
70
  cropped_img_rgba[:, :, 3] = mask_bool * 255
71
  cropped_img_rgba = cropped_img_rgba[y1:y2, x1:x2]
72
+
73
  return Image.fromarray(cropped_img_rgba, 'RGBA')
74
 
75
+
76
  def extract_features(segmented_image: Image.Image) -> dict:
77
  image_rgba = np.array(segmented_image)
78
  if image_rgba.shape[2] != 4: raise ValueError("Segmented image must be RGBA")