Spaces:
Runtime error
Runtime error
File size: 3,178 Bytes
1d3f775 fefbab6 425f058 1d3f775 da2fab4 1d3f775 3284380 438b834 3284380 438b834 3284380 438b834 f944c1e 057f3bb 438b834 3284380 438b834 1d3f775 3284380 1d3f775 305e358 1d3f775 3284380 5050e07 3284380 305e358 3284380 1d3f775 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import numpy as np
import cv2
from PIL import Image
from ultralytics import YOLO
import torch
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor
# Define available YOLO models
available_models = {
"X-ray": YOLO("xray.pt"),
"CT scan": YOLO("CT.pt"),
"Ultrasound": YOLO("ultrasound.pt"),
# Add more models as needed
}
def segment_image(input_image, selected_model):
# Resize the input image to 255x255
img = np.array(input_image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
model = available_models[selected_model]
# Perform object detection and segmentation
results = model(img)
mask = results[0].masks.data.numpy()
target_height = img.shape[0]
target_width = img.shape[1]
# Resize the mask using OpenCV
resized_mask = cv2.resize(mask[0], (target_width, target_height))
resized_mask = (resized_mask * 255).astype(np.uint8)
# Create a copy of the original image
overlay_image = img.copy()
# Apply the resized mask to the overlay image
overlay_image[resized_mask > 0] = [50, 0, 0] # Overlay in green
# Convert the overlay image to PIL format
overlay_pil = Image.fromarray(overlay_image)
# Convert the resized mask to PIL format
mask_pil = Image.fromarray(resized_mask)
for result in results:
boxes = result.boxes
bbox = boxes.xyxy.tolist()[0]
sam_checkpoint = "medsam_vit_b.pth"
model_type = "vit_b"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint, map_location=torch.device('cpu'))
#sam_checkpoint = "medsam_vit_b.pth"
#model_type = "vit_b"
#sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
#sam.to(device='cpu')
predictor = SamPredictor(sam)
predictor.set_image(img)
input_box = np.array(bbox)
masks_, _, _ = predictor.predict(
point_coords=None,
point_labels=None,
box=input_box,
multimask_output=False)
fmask = masks_[0].astype(int)
resized_mask1 = cv2.resize(fmask, (target_width, target_height))
resized_mask1 = (resized_mask1 * 255).astype(np.uint8)
overlay_image1 = img.copy()
# Apply the resized mask to the overlay image
overlay_image1[resized_mask1 > 0] = [50, 50, 0] # Overlay in green
# Convert the overlay image to PIL format
overlay_pil1 = Image.fromarray(overlay_image1)
return overlay_pil, overlay_pil1 # Return both overlay image and mask
# Create the Gradio interface with a dropdown for model selection
iface = gr.Interface(
fn=segment_image,
inputs=[
gr.components.Image(type="pil", label="Upload an image"),
gr.components.Dropdown(
choices=list(available_models.keys()),
label="Select YOLO Model",
default="X-ray"
)
],
outputs=[
gr.components.Image(type="pil", label="YOLO predicted mask and images"),
gr.components.Image(type="pil", label="YOLO and SAM predicted mask and images ")
],
title="YOLOv8 with SAM π",
description='This software generates the segmentation mask Medical images'
)
iface.launch()
|