Update app.py
Browse files
app.py
CHANGED
@@ -48,12 +48,12 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use GPU
|
|
48 |
model = attempt_load(model_path, device=device) # Placeholder for model loading
|
49 |
model.eval() # Set the model to evaluation mode
|
50 |
|
51 |
-
|
52 |
-
def preprocess_image(image):
|
53 |
-
|
54 |
print("in preprocess-0 image.shape:",image.size)
|
55 |
-
img = letterbox(image, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
|
56 |
-
|
57 |
print("in preprocess-1 img.shape:",img.shape)
|
58 |
img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB, to 3x416x416
|
59 |
img = np.ascontiguousarray(img)
|
@@ -61,7 +61,6 @@ def preprocess_image(image):
|
|
61 |
img = img.float() # uint8 to fp16/32
|
62 |
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
63 |
if img.ndimension() == 3:
|
64 |
-
#img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB,
|
65 |
img = img.unsqueeze(0)
|
66 |
print("in preprocess-2 img.shape:",img.shape)
|
67 |
|
@@ -86,25 +85,25 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
|
|
86 |
coords[:, :4].clip_(min=0, max=img1_shape[0]) # clip boxes
|
87 |
return coords
|
88 |
|
89 |
-
|
90 |
-
def postprocess(pred, img0, img):
|
91 |
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False)
|
92 |
results = []
|
93 |
for det in pred: # detections per image
|
94 |
if len(det):
|
95 |
-
|
96 |
-
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
|
97 |
for *xyxy, conf, cls in reversed(det):
|
98 |
results.append((xyxy, conf, cls))
|
99 |
return results
|
100 |
|
101 |
def detect_objects(image_path):
|
102 |
-
dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
|
103 |
-
|
104 |
img, img0 = preprocess_image(dicom_image)
|
105 |
pred = infer(model, img)
|
106 |
-
|
107 |
-
results = postprocess(pred, dicom_image, img)
|
108 |
return results, dicom_image
|
109 |
|
110 |
def draw_bounding_boxes(img, results):
|
@@ -117,7 +116,7 @@ def draw_bounding_boxes(img, results):
|
|
117 |
def show_preds_image(filepath):
|
118 |
#dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
|
119 |
results, img0 = detect_objects(filepath)
|
120 |
-
|
121 |
#img_with_boxes = draw_bounding_boxes(img0, results)
|
122 |
img_with_boxes = draw_bounding_boxes(img0, results)
|
123 |
return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB)
|
|
|
48 |
model = attempt_load(model_path, device=device) # Placeholder for model loading
|
49 |
model.eval() # Set the model to evaluation mode
|
50 |
|
51 |
+
def preprocess_image(image_path):
|
52 |
+
#def preprocess_image(image):
|
53 |
+
img0 = cv2.imread(image_path)
|
54 |
print("in preprocess-0 image.shape:",image.size)
|
55 |
+
#img = letterbox(image, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
|
56 |
+
img = letterbox(img0, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
|
57 |
print("in preprocess-1 img.shape:",img.shape)
|
58 |
img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB, to 3x416x416
|
59 |
img = np.ascontiguousarray(img)
|
|
|
61 |
img = img.float() # uint8 to fp16/32
|
62 |
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
63 |
if img.ndimension() == 3:
|
|
|
64 |
img = img.unsqueeze(0)
|
65 |
print("in preprocess-2 img.shape:",img.shape)
|
66 |
|
|
|
85 |
coords[:, :4].clip_(min=0, max=img1_shape[0]) # clip boxes
|
86 |
return coords
|
87 |
|
88 |
+
def postprocess(pred, img0_shape, img):
|
89 |
+
#def postprocess(pred, img0, img):
|
90 |
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False)
|
91 |
results = []
|
92 |
for det in pred: # detections per image
|
93 |
if len(det):
|
94 |
+
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0_shape).round()
|
95 |
+
#det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
|
96 |
for *xyxy, conf, cls in reversed(det):
|
97 |
results.append((xyxy, conf, cls))
|
98 |
return results
|
99 |
|
100 |
def detect_objects(image_path):
|
101 |
+
#dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
|
102 |
+
img, img0 = preprocess_image(image_path)
|
103 |
img, img0 = preprocess_image(dicom_image)
|
104 |
pred = infer(model, img)
|
105 |
+
results = postprocess(pred, img0.shape, img)
|
106 |
+
#results = postprocess(pred, dicom_image, img)
|
107 |
return results, dicom_image
|
108 |
|
109 |
def draw_bounding_boxes(img, results):
|
|
|
116 |
def show_preds_image(filepath):
|
117 |
#dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
|
118 |
results, img0 = detect_objects(filepath)
|
119 |
+
img0 = cv2.imread(filepath)
|
120 |
#img_with_boxes = draw_bounding_boxes(img0, results)
|
121 |
img_with_boxes = draw_bounding_boxes(img0, results)
|
122 |
return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB)
|