Abijith commited on
Commit
1114c33
·
1 Parent(s): 8a448cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -77,9 +77,9 @@ def clipseg_prediction(image):
77
 
78
  # Vehicle checking
79
  if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
80
- return True, [bbox_normalization(damage_bbox, img_w, img_h)]
81
  else:
82
- return False, [[]]
83
 
84
 
85
  @torch.no_grad()
@@ -87,7 +87,7 @@ def foward_pass(image_input: np.ndarray, points: List[List[int]]) -> np.ndarray:
87
  print('SAM-Segmentation-started------->')
88
  global cache_data
89
  image_input = Image.fromarray(image_input)
90
- inputs = processor(image_input, input_boxes=points, return_tensors="pt").to(device)
91
  if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
92
  embedding = model.get_image_embeddings(inputs["pixel_values"])
93
  pixels = inputs["pixel_values"]
 
77
 
78
  # Vehicle checking
79
  if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):
80
+ return True, bbox_normalization(damage_bbox, img_w, img_h)
81
  else:
82
+ return False, []
83
 
84
 
85
  @torch.no_grad()
 
87
  print('SAM-Segmentation-started------->')
88
  global cache_data
89
  image_input = Image.fromarray(image_input)
90
+ inputs = processor(image_input, input_boxes=np.array(points), return_tensors="pt").to(device)
91
  if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):
92
  embedding = model.get_image_embeddings(inputs["pixel_values"])
93
  pixels = inputs["pixel_values"]