Spaces:
Runtime error
Runtime error
Commit
·
438b834
1
Parent(s):
fefbab6
Update app.py
Browse files
app.py
CHANGED
@@ -14,9 +14,71 @@ available_models = {
|
|
14 |
}
|
15 |
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# Create a function to perform image segmentation using the selected model
|
19 |
-
def segment_image(input_image, selected_model):
|
20 |
# Resize the input image to 255x255
|
21 |
img = np.array(input_image)
|
22 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
@@ -41,7 +103,7 @@ def segment_image(input_image, selected_model):
|
|
41 |
# Convert the overlay image to PIL format
|
42 |
overlay_pil = Image.fromarray(overlay_image)
|
43 |
|
44 |
-
return overlay_pil
|
45 |
|
46 |
# Create the Gradio interface with a dropdown for model selection
|
47 |
iface = gr.Interface(
|
|
|
14 |
}
|
15 |
|
16 |
|
17 |
+
def segment_image(input_image):
|
18 |
+
# Resize the input image to 255x255
|
19 |
+
img = np.array(input_image)
|
20 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
21 |
+
|
22 |
+
# Perform object detection and segmentation
|
23 |
+
results = model(img)
|
24 |
+
mask = results[0].masks.data.numpy()
|
25 |
+
target_height = img.shape[0]
|
26 |
+
target_width = img.shape[1]
|
27 |
+
|
28 |
+
# Resize the mask using OpenCV
|
29 |
+
resized_mask = cv2.resize(mask[0], (target_width, target_height))
|
30 |
+
resized_mask = (resized_mask * 255).astype(np.uint8)
|
31 |
+
|
32 |
+
# Create a copy of the original image
|
33 |
+
overlay_image = img.copy()
|
34 |
+
|
35 |
+
# Apply the resized mask to the overlay image
|
36 |
+
overlay_image[resized_mask > 0] = [50, 0, 0] # Overlay in green
|
37 |
+
|
38 |
+
# Convert the overlay image to PIL format
|
39 |
+
overlay_pil = Image.fromarray(overlay_image)
|
40 |
+
|
41 |
+
# Convert the resized mask to PIL format
|
42 |
+
mask_pil = Image.fromarray(resized_mask)
|
43 |
+
|
44 |
+
for result in results:
|
45 |
+
boxes = result.boxes
|
46 |
+
bbox = boxes.xyxy.tolist()[0]
|
47 |
+
|
48 |
+
sam_checkpoint = "sam_vit_h_4b8939.pth"
|
49 |
+
model_type = "vit_h"
|
50 |
+
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
|
51 |
+
sam.to(device='cpu')
|
52 |
+
predictor = SamPredictor(sam)
|
53 |
+
predictor.set_image(img)
|
54 |
+
|
55 |
+
|
56 |
+
input_box = np.array(bbox)
|
57 |
+
masks_, _, _ = predictor.predict(
|
58 |
+
point_coords=None,
|
59 |
+
point_labels=None,
|
60 |
+
box=input_box,
|
61 |
+
multimask_output=False)
|
62 |
+
|
63 |
+
fmask = masks_[0].astype(int)
|
64 |
+
|
65 |
+
resized_mask1 =cv2.resize(fmask[0], (target_width, target_height))
|
66 |
+
resized_mask1 = (resized_mask1 * 255).astype(np.uint8)
|
67 |
+
|
68 |
+
overlay_image1 = img.copy()
|
69 |
+
# Apply the resized mask to the overlay image
|
70 |
+
overlay_image1[resized_mask1 > 0] = [50, 50, 0] # Overlay in green
|
71 |
+
|
72 |
+
# Convert the overlay image to PIL format
|
73 |
+
overlay_pil1 = Image.fromarray(overlay_image1)
|
74 |
+
|
75 |
+
|
76 |
+
return overlay_pil, overlay_pil1 # Return both overlay image and mask
|
77 |
+
|
78 |
+
|
79 |
|
80 |
# Create a function to perform image segmentation using the selected model
|
81 |
+
'''def segment_image(input_image, selected_model):
|
82 |
# Resize the input image to 255x255
|
83 |
img = np.array(input_image)
|
84 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
|
103 |
# Convert the overlay image to PIL format
|
104 |
overlay_pil = Image.fromarray(overlay_image)
|
105 |
|
106 |
+
return overlay_pil'''
|
107 |
|
108 |
# Create the Gradio interface with a dropdown for model selection
|
109 |
iface = gr.Interface(
|