wangjin2000 commited on
Commit
53d71b3
·
verified ·
1 Parent(s): 63257d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -55
app.py CHANGED
@@ -18,44 +18,20 @@ from yolov5.models.experimental import attempt_load
18
  from yolov5.utils.general import non_max_suppression
19
  from yolov5.utils.augmentations import letterbox
20
 
21
- '''
22
- # Example URLs for downloading images
23
- file_urls = [
24
- "https://www.dropbox.com/scl/fi/n3bs5xnl2kanqmwv483k3/1_jpg.rf.4a59a63d0a7339d280dd18ef3c2e675a.jpg?rlkey=4n9dnls1byb4wm54ycxzx3ovi&st=ue5xv8yx&dl=0",
25
- "https://www.dropbox.com/scl/fi/asrmao4b4fpsrhqex8kog/2_jpg.rf.b87583d95aa220d4b7b532ae1948e7b7.jpg?rlkey=jkmux5jjy8euzhxizupdmpesb&st=v3ld14tx&dl=0",
26
- "https://www.dropbox.com/scl/fi/fi0e8zxqqy06asnu0robz/3_jpg.rf.d2932cce7e88c2675e300ececf9f1b82.jpg?rlkey=hfdqwxkxetabe38ukzbb39pl5&st=ga1uouhj&dl=0",
27
- "https://www.dropbox.com/scl/fi/ruobyat1ld1c33ch5yjpv/4_jpg.rf.3395c50b4db0ec0ed3448276965b2459.jpg?rlkey=j1m4qa0pmdh3rlr344v82u3am&st=lex8h3qi&dl=0",
28
- "https://www.dropbox.com/scl/fi/ok3izk4jj1pg6psxja3aj/5_jpg.rf.62f3dc64b6c894fbb165d8f6e2ee1382.jpg?rlkey=euu16z8fd8u8za4aflvu5qg4v&st=pwno39nc&dl=0",
29
- "https://www.dropbox.com/scl/fi/8r1fpwxkwq7c2i6ky6qv5/10_jpg.rf.c1785c33dd3552e860bf043c2fd0a379.jpg?rlkey=fcw41ppgzu0ao7xo6ijbpdi4c&st=to2udvxb&dl=0",
30
- "https://www.dropbox.com/scl/fi/ihiid7hbz1vvaoqrstwa5/7_jpg.rf.dfc30f9dc198cf6697d9023ac076e822.jpg?rlkey=yh67p4ex52wn9t0bfw0jr77ef&st=02qw80xa&dl=0",
31
- ]
32
-
33
- def download_file(url, save_name):
34
- """Downloads a file from a URL."""
35
- if not os.path.exists(save_name):
36
- file = requests.get(url)
37
- with open(save_name, 'wb') as f:
38
- f.write(file.content)
39
-
40
- # Download images
41
- for i, url in enumerate(file_urls):
42
- download_file(url, f"image_{i}.jpg")
43
- '''
44
-
45
  # Load YOLOv5 model (placeholder)
46
  model_path = "best.pt" # Path to your YOLOv5 model
47
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use GPU if available
48
  model = attempt_load(model_path, device=device) # Placeholder for model loading
49
  model.eval() # Set the model to evaluation mode
50
 
51
- def preprocess_image(image_path):
52
- #def preprocess_image(image):
53
- img0 = cv2.imread(image_path)
54
- print("in preprocess-0 image.shape:",img0.size)
55
- #img = letterbox(image, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
56
- img = letterbox(img0, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
57
  print("in preprocess-1 img.shape:",img.shape)
58
- img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB, to 3x416x416
59
  img = np.ascontiguousarray(img)
60
  img = torch.from_numpy(img).to(device)
61
  img = img.float() # uint8 to fp16/32
@@ -64,8 +40,8 @@ def preprocess_image(image_path):
64
  img = img.unsqueeze(0)
65
  print("in preprocess-2 img.shape:",img.shape)
66
 
67
- #return img, image
68
- return img, img0
69
 
70
  def infer(model, img):
71
  with torch.no_grad():
@@ -86,25 +62,25 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
86
  coords[:, :4].clip_(min=0, max=img1_shape[0]) # clip boxes
87
  return coords
88
 
89
- def postprocess(pred, img0_shape, img):
90
- #def postprocess(pred, img0, img):
91
  pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False)
92
  results = []
93
  for det in pred: # detections per image
94
  if len(det):
95
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0_shape).round()
96
- #det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
97
  for *xyxy, conf, cls in reversed(det):
98
  results.append((xyxy, conf, cls))
99
  return results
100
 
101
  def detect_objects(image_path):
102
- #dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
103
- img, img0 = preprocess_image(image_path)
104
- #img, img0 = preprocess_image(dicom_image)
105
  pred = infer(model, img)
106
- results = postprocess(pred, img0.shape, img)
107
- #results = postprocess(pred, dicom_image, img)
108
  return results #, dicom_image
109
 
110
  def draw_bounding_boxes(img, results):
@@ -115,11 +91,9 @@ def draw_bounding_boxes(img, results):
115
  return img
116
 
117
  def show_preds_image(filepath):
118
- #dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
119
- #results, img0 = detect_objects(filepath)
120
- results = detect_objects(filepath)
121
- img0 = cv2.imread(filepath)
122
- #img_with_boxes = draw_bounding_boxes(img0, results)
123
  img_with_boxes = draw_bounding_boxes(img0, results)
124
  return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB)
125
 
@@ -134,14 +108,7 @@ interface = gr.Interface(
134
  inputs=input_component,
135
  outputs=output_component,
136
  title="Lung Nodule Detection",
137
- examples=[
138
- "image_1.jpg",
139
- "image_2.jpg",
140
- "image_3.jpg",
141
- "image_4.jpg",
142
- "image_5.jpg",
143
- "image_6.jpg",
144
- ],
145
  description=' "This online deployment proves the effectiveness and efficient function of the machine learning model in identifying lung cancer nodules. The implementation of YOLO for core detection tasks is employed that is an efficient and accurate algorithm for object detection. Through the precise hyper-parameter tuning process, the model proposed in this paper has given an impressive boost in the performance. Moreover, the model uses Retinanet algorithm which is recognized as the powerful tool effective in dense object detection. In an attempt to enhance the model’s performance, the backbone of this architecture consists of a Feature Pyramid Network (FPN). The FPN plays an important role in boosting the model’s capacity in recognizing objects in different scales through the construction of high semantic feature map in different resolutions. In conclusion, this deployment encompasses YOLOv5, hyperparameter optimization, Retinanet, and FPN as one of the most effective and modern solutions for the detection of lung cancer nodules." ~ Basil Shaji 😇',
146
  live=False,
147
  )
 
18
  from yolov5.utils.general import non_max_suppression
19
  from yolov5.utils.augmentations import letterbox
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Load YOLOv5 model (placeholder)
22
  model_path = "best.pt" # Path to your YOLOv5 model
23
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use GPU if available
24
  model = attempt_load(model_path, device=device) # Placeholder for model loading
25
  model.eval() # Set the model to evaluation mode
26
 
27
+ #def preprocess_image(image_path):
28
+ def preprocess_image(image):
29
+ #img0 = cv2.imread(image_path)
30
+ print("in preprocess-0 image.shape:",image.size)
31
+ img = letterbox(image, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
32
+ #img = letterbox(img0, 640, stride=32, auto=True)[0] # Resize and pad to 640x640
33
  print("in preprocess-1 img.shape:",img.shape)
34
+ img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB,
35
  img = np.ascontiguousarray(img)
36
  img = torch.from_numpy(img).to(device)
37
  img = img.float() # uint8 to fp16/32
 
40
  img = img.unsqueeze(0)
41
  print("in preprocess-2 img.shape:",img.shape)
42
 
43
+ return img, image
44
+ #return img, img0
45
 
46
  def infer(model, img):
47
  with torch.no_grad():
 
62
  coords[:, :4].clip_(min=0, max=img1_shape[0]) # clip boxes
63
  return coords
64
 
65
+ #def postprocess(pred, img0_shape, img):
66
+ def postprocess(pred, img0, img):
67
  pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False)
68
  results = []
69
  for det in pred: # detections per image
70
  if len(det):
71
+ #det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0_shape).round()
72
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
73
  for *xyxy, conf, cls in reversed(det):
74
  results.append((xyxy, conf, cls))
75
  return results
76
 
77
  def detect_objects(image_path):
78
+ dicom_image, dicom_meta = read_and_preprocess_dicom(image_path)
79
+ #img, img0 = preprocess_image(image_path)
80
+ img, img0 = preprocess_image(dicom_image)
81
  pred = infer(model, img)
82
+ #results = postprocess(pred, img0.shape, img)
83
+ results = postprocess(pred, dicom_image, img)
84
  return results #, dicom_image
85
 
86
  def draw_bounding_boxes(img, results):
 
91
  return img
92
 
93
  def show_preds_image(filepath):
94
+ results, img0 = detect_objects(filepath)
95
+ #results = detect_objects(filepath)
96
+ #img0 = cv2.imread(filepath)
 
 
97
  img_with_boxes = draw_bounding_boxes(img0, results)
98
  return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB)
99
 
 
108
  inputs=input_component,
109
  outputs=output_component,
110
  title="Lung Nodule Detection",
111
+ examples=['samples/81_80.dcm','samples/110_109.dcm','samples/189_188.dcm'],
 
 
 
 
 
 
 
112
  description=' "This online deployment proves the effectiveness and efficient function of the machine learning model in identifying lung cancer nodules. The implementation of YOLO for core detection tasks is employed that is an efficient and accurate algorithm for object detection. Through the precise hyper-parameter tuning process, the model proposed in this paper has given an impressive boost in the performance. Moreover, the model uses Retinanet algorithm which is recognized as the powerful tool effective in dense object detection. In an attempt to enhance the model’s performance, the backbone of this architecture consists of a Feature Pyramid Network (FPN). The FPN plays an important role in boosting the model’s capacity in recognizing objects in different scales through the construction of high semantic feature map in different resolutions. In conclusion, this deployment encompasses YOLOv5, hyperparameter optimization, Retinanet, and FPN as one of the most effective and modern solutions for the detection of lung cancer nodules." ~ Basil Shaji 😇',
113
  live=False,
114
  )