sumit-ai-ml commited on
Commit
3284380
Β·
1 Parent(s): a5681c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -42
app.py CHANGED
@@ -13,15 +13,15 @@ available_models = {
13
  # Add more models as needed
14
  }
15
 
16
-
17
- def segment_image(input_image):
18
  # Resize the input image to 255x255
19
  img = np.array(input_image)
20
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
21
-
 
22
  # Perform object detection and segmentation
23
  results = model(img)
24
- mask = results[0].masks.data.numpy()
25
  target_height = img.shape[0]
26
  target_width = img.shape[1]
27
 
@@ -52,7 +52,6 @@ def segment_image(input_image):
52
  predictor = SamPredictor(sam)
53
  predictor.set_image(img)
54
 
55
-
56
  input_box = np.array(bbox)
57
  masks_, _, _ = predictor.predict(
58
  point_coords=None,
@@ -62,7 +61,7 @@ def segment_image(input_image):
62
 
63
  fmask = masks_[0].astype(int)
64
 
65
- resized_mask1 =cv2.resize(fmask, (target_width, target_height))
66
  resized_mask1 = (resized_mask1 * 255).astype(np.uint8)
67
 
68
  overlay_image1 = img.copy()
@@ -72,54 +71,25 @@ def segment_image(input_image):
72
  # Convert the overlay image to PIL format
73
  overlay_pil1 = Image.fromarray(overlay_image1)
74
 
75
-
76
  return overlay_pil, overlay_pil1 # Return both overlay image and mask
77
 
78
-
79
-
80
- # Create a function to perform image segmentation using the selected model
81
- '''def segment_image(input_image, selected_model):
82
- # Resize the input image to 255x255
83
- img = np.array(input_image)
84
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
85
-
86
- # Perform object detection and segmentation using the selected model
87
- model = available_models[selected_model]
88
- results = model(img)
89
- mask = results[0].masks.data.numpy()
90
- target_height = img.shape[0]
91
- target_width = img.shape[1]
92
-
93
- # Resize the mask using OpenCV
94
- resized_mask = cv2.resize(mask[0], (target_width, target_height))
95
- resized_mask = (resized_mask * 255).astype(np.uint8)
96
-
97
- # Create a copy of the original image
98
- overlay_image = img.copy()
99
-
100
- # Apply the resized mask to the overlay image
101
- overlay_image[resized_mask > 0] = [50, 0, 0] # Overlay in green
102
-
103
- # Convert the overlay image to PIL format
104
- overlay_pil = Image.fromarray(overlay_image)
105
-
106
- return overlay_pil'''
107
-
108
  # Create the Gradio interface with a dropdown for model selection
109
  iface = gr.Interface(
110
  fn=segment_image,
111
  inputs=[
112
- gr.inputs.Image(type="pil", label="Upload an image"),
113
- gr.inputs.Dropdown(
114
  choices=list(available_models.keys()),
115
  label="Select YOLO Model",
116
  default="X-ray"
117
  )
118
  ],
119
- outputs=[gr.outputs.Image(type="numpy", label="Segmented Image"),
120
- gr.outputs.Image(type="numpy", label="Segmentation Mask")], # Add an output for the mask
 
 
121
  title="YOLOv8 with SAM πŸ˜ƒ",
122
- description='This software generates the segmentation mask for Aorta for Point of Care Ultrasound (POCUS) images'
123
  )
124
 
125
  iface.launch()
 
13
  # Add more models as needed
14
  }
15
 
16
+ def segment_image(input_image, selected_model):
 
17
  # Resize the input image to 255x255
18
  img = np.array(input_image)
19
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
20
+ model = available_models[selected_model]
21
+
22
  # Perform object detection and segmentation
23
  results = model(img)
24
+ mask = results[0].masks.data.numpy()
25
  target_height = img.shape[0]
26
  target_width = img.shape[1]
27
 
 
52
  predictor = SamPredictor(sam)
53
  predictor.set_image(img)
54
 
 
55
  input_box = np.array(bbox)
56
  masks_, _, _ = predictor.predict(
57
  point_coords=None,
 
61
 
62
  fmask = masks_[0].astype(int)
63
 
64
+ resized_mask1 = cv2.resize(fmask, (target_width, target_height))
65
  resized_mask1 = (resized_mask1 * 255).astype(np.uint8)
66
 
67
  overlay_image1 = img.copy()
 
71
  # Convert the overlay image to PIL format
72
  overlay_pil1 = Image.fromarray(overlay_image1)
73
 
 
74
  return overlay_pil, overlay_pil1 # Return both overlay image and mask
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  # Create the Gradio interface with a dropdown for model selection
77
  iface = gr.Interface(
78
  fn=segment_image,
79
  inputs=[
80
+ gr.components.Image(type="pil", label="Upload an image"),
81
+ gr.components.Dropdown(
82
  choices=list(available_models.keys()),
83
  label="Select YOLO Model",
84
  default="X-ray"
85
  )
86
  ],
87
+ outputs=[
88
+ gr.components.Image(type="pil", label="Segmented Image"),
89
+ gr.components.Image(type="pil", label="Segmentation Mask")
90
+ ],
91
  title="YOLOv8 with SAM πŸ˜ƒ",
92
+ description='This software generates the segmentation mask Medical images'
93
  )
94
 
95
  iface.launch()