satvs commited on
Commit
cd8c2b8
·
1 Parent(s): 1288dc4

Optimize submission

Browse files
Files changed (1) hide show
  1. tasks/image.py +27 -37
tasks/image.py CHANGED
@@ -116,52 +116,42 @@ async def evaluate_image(request: ImageEvaluationRequest):
116
  model = YOLO(Path(model_path, model_name), task="detect")
117
  device_name = device("cuda" if is_available() else "cpu")
118
 
119
- # Preprocess annotations before the loop
120
- preprocessed_annotations = [parse_boxes(example.get("annotations", "").strip()) for example in test_dataset]
121
-
122
- batch_size = 16 # Define a batch size
123
- batch_images = []
124
- batch_annotations = []
125
-
126
  predictions = []
127
  true_labels = []
128
  pred_boxes = []
129
  true_boxes_list = [] # List of lists, each inner list contains boxes for one image
130
 
131
- logging.info(f"Inference start on device: {device_name}")
 
132
 
133
  # Use torch.no_grad() to disable gradient tracking during inference
134
  with no_grad():
 
 
 
 
 
 
135
  for idx, example in enumerate(test_dataset):
136
- batch_images.append(example["image"])
137
- batch_annotations.append(preprocessed_annotations[idx])
138
-
139
- # When the batch size is met, or it's the last image, perform inference
140
- if (len(batch_images) == batch_size or idx == len(test_dataset) - 1):
141
- # Make a prediction for the current batch
142
- results = model.predict(batch_images, device=device_name, conf=THRESHOLD, verbose=False, half=True, imgsz=IMGSIZE)[0]
143
-
144
- for batch_idx, result in enumerate(results):
145
- annotation = batch_annotations[batch_idx]
146
- has_smoke = len(annotation) > 0
147
- true_labels.append(int(has_smoke))
148
-
149
- pred_has_smoke = len(result) > 0
150
- predictions.append(int(pred_has_smoke))
151
-
152
- if has_smoke:
153
- true_boxes_list.append(annotation)
154
-
155
- # Handle prediction boxes for each image in the batch
156
- if result.boxes.cls.numel() != 0:
157
- pred_boxes.append(result.boxes[0].xywhn.tolist()[0])
158
- else:
159
- pred_boxes.append([0, 0, 0, 0])
160
-
161
- # Clear the batch after processing
162
- batch_images.clear()
163
- batch_annotations.clear()
164
-
165
 
166
  #--------------------------------------------------------------------------------------------
167
  # YOUR MODEL INFERENCE STOPS HERE
 
116
  model = YOLO(Path(model_path, model_name), task="detect")
117
  device_name = device("cuda" if is_available() else "cpu")
118
 
 
 
 
 
 
 
 
119
  predictions = []
120
  true_labels = []
121
  pred_boxes = []
122
  true_boxes_list = [] # List of lists, each inner list contains boxes for one image
123
 
124
+ # Preprocess annotations before the loop
125
+ preprocessed_annotations = [parse_boxes(example.get("annotations", "").strip()) for example in test_dataset]
126
 
127
  # Use torch.no_grad() to disable gradient tracking during inference
128
  with no_grad():
129
+ predictions = []
130
+ true_labels = []
131
+ pred_boxes = []
132
+ true_boxes_list = [] # List of lists, each inner list contains boxes for one image
133
+
134
+ logging.info(f"Inference start on device: {device_name}")
135
  for idx, example in enumerate(test_dataset):
136
+ annotation = preprocessed_annotations[idx]
137
+ has_smoke = len(annotation) > 0
138
+ true_labels.append(int(has_smoke))
139
+
140
+ # Make prediction for the current image
141
+ results = model.predict(example["image"], device=device_name, conf=THRESHOLD, verbose=False, imgsz=IMGSIZE)[0]
142
+
143
+ pred_has_smoke = len(results) > 0
144
+ predictions.append(int(pred_has_smoke))
145
+
146
+ # If there's a true box, add it to the list
147
+ if has_smoke:
148
+ true_boxes_list.append(annotation) # True boxes are already preprocessed
149
+
150
+ # Handle prediction boxes: Append first box (or default box if none detected)
151
+ if results.boxes.cls.numel() != 0:
152
+ pred_boxes.append(results.boxes[0].xywhn.tolist()[0])
153
+ else:
154
+ pred_boxes.append([0, 0, 0, 0])
 
 
 
 
 
 
 
 
 
 
155
 
156
  #--------------------------------------------------------------------------------------------
157
  # YOUR MODEL INFERENCE STOPS HERE