File size: 2,182 Bytes
60af537
57419d8
9b2c5e1
57419d8
 
 
 
d4cb7c6
57419d8
 
 
 
 
 
 
 
 
6c34a8c
c5224aa
57419d8
 
 
 
c3d8605
 
f7b8e0e
 
c3d8605
aca98af
 
29a24a4
 
 
 
 
aca98af
 
 
 
 
 
 
 
 
 
 
 
 
29a24a4
aca98af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import numpy as np
import cv2
import os
from PIL import Image
import torchvision.transforms as transforms
import gradio as gr
from yolov5 import xai_yolov5
from yolov8 import xai_yolov8s

def process_image(image, yolo_versions=["yolov5"]):
    image = np.array(image)
    image = cv2.resize(image, (640, 640))

    result_images = []
    for yolo_version in yolo_versions:
        if yolo_version == "yolov5":
            result_images.append(xai_yolov5(image)) 
        elif yolo_version == "yolov8s":
            result_images.append(xai_yolov8s(image))
        else:
            result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
    return result_images


sample_images = {
    "Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
    "Sample 2":  os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
}
def load_sample_image(sample_name):
    if sample_name in sample_images:
        try:
            return Image.open(sample_images[sample_name])  # Load and return the image
        except Exception as e:
            print(f"Error loading image: {e}")
            return None
    return None

with gr.Blocks() as interface:
    gr.Markdown("# Visualizing Key Features with Explainable AI")
    gr.Markdown("Upload an image or select a sample image to visualize object detection.")

    with gr.Row():
        uploaded_image = gr.Image(type="pil", label="Upload an Image")
        sample_selection = gr.Dropdown(
            choices=list(sample_images.keys()),
            label="Select a Sample Image",
            type="value",
        )
        sample_display = gr.Image(label="Sample Image Preview", value=None)
        sample_selection.change(fn=load_sample_image, inputs=sample_selection, outputs=sample_display)

    selected_models = gr.CheckboxGroup(
        choices=["yolov3", "yolov8s"],
        value=["yolov5"],  # Default model
        label="Select Model(s)",
    )

    result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)

    gr.Button("Run").click(
        fn=process_image,
        inputs=[uploaded_image, selected_models],
        outputs=result_gallery,
    )

interface.launch()