File size: 3,726 Bytes
e36cc5b fcb6651 e36cc5b 971650f fcb6651 e36cc5b 971650f e36cc5b 971650f e36cc5b 625d75d e36cc5b 971650f a791d53 971650f e36cc5b 625d75d e36cc5b fcb6651 e36cc5b fcb6651 e36cc5b fcb6651 e36cc5b fcb6651 e36cc5b fcb6651 e36cc5b fcb6651 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import gradio as gr
from datasets import load_dataset
from PIL import Image, ImageDraw
import numpy as np
# Load the dataset
dataset = load_dataset("dwb2023/brain-tumor-image-dataset-semantic-segmentation", split="test")
# Function to filter dataset based on category ID
def filter_dataset_by_category(category_id):
filtered_indices = [i for i, record in enumerate(dataset) if record["category_id"] == category_id]
return filtered_indices
# Function to draw annotations
def draw_annotations(index, category_id):
filtered_indices = filter_dataset_by_category(category_id)
if index >= len(filtered_indices):
index = 0
try:
# Fetch the image and annotations from the dataset
record = dataset[filtered_indices[index]]
# Convert image to PIL Image if it's a numpy array
if isinstance(record['image'], np.ndarray):
img = Image.fromarray(record['image'])
else:
img = record['image']
img = img.convert("RGB") # Ensure the image is in RGB mode
draw = ImageDraw.Draw(img)
# Draw bounding box
bbox = record["bbox"]
draw.rectangle([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], outline="red", width=2)
# Draw segmentation mask
segmentation = record["segmentation"]
for seg in segmentation:
draw.polygon(seg, outline="blue", width=2)
# Prepare additional information
category_id = record["category_id"]
area = record["area"]
file_name = record["file_name"]
info = f"File Name: {file_name}\n"
info += f"Image ID: {record['id']}\n"
info += f"Category ID: {category_id}\n"
info += f"Bounding Box: [{bbox[0]:.2f}, {bbox[1]:.2f}, {bbox[2]:.2f}, {bbox[3]:.2f}]\n"
info += f"Segmentation: {segmentation}\n"
info += f"Area: {area:.2f}"
return img, info, len(filtered_indices) - 1
except Exception as e:
print(f"Error processing image at index {index}: {e}")
return Image.new('RGB', (300, 300), color='gray'), f"Error loading image information: {str(e)}", len(filtered_indices) - 1
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Brain Tumor Image Dataset Viewer")
gr.Markdown("## Refer to the [dwb2023/brain-tumor-image-dataset-semantic-segmentation](https://huggingface.co/datasets/dwb2023/brain-tumor-image-dataset-semantic-segmentation/viewer/default/test) dataset for more information")
with gr.Row():
with gr.Column(scale=1):
image_output = gr.Image(label="Annotated Image")
with gr.Column(scale=1):
category_id_dropdown = gr.Dropdown(choices=[1, 2], value=1, label="Category ID")
image_index = gr.Slider(minimum=0, maximum=0, step=1, value=0, label="Image ID Slider")
info_output = gr.Textbox(label="Image Information", lines=10)
def update_slider(category_id):
_, _, max_index = draw_annotations(0, category_id)
return gr.Slider.update(maximum=max_index)
# Update image and info when slider or category changes
category_id_dropdown.change(update_slider, inputs=category_id_dropdown, outputs=image_index)
category_id_dropdown.change(draw_annotations, inputs=[image_index, category_id_dropdown], outputs=[image_output, info_output, image_index])
image_index.change(draw_annotations, inputs=[image_index, category_id_dropdown], outputs=[image_output, info_output, image_index])
# Display initial image and info
demo.load(draw_annotations, inputs=[image_index, category_id_dropdown], outputs=[image_output, info_output, image_index])
demo.launch(debug=True)
|