File size: 3,117 Bytes
60af537
57419d8
9b2c5e1
57419d8
 
 
 
d4cb7c6
57419d8
 
 
 
 
 
 
 
6c34a8c
c5224aa
57419d8
 
 
 
1cc8cac
c3d8605
f7b8e0e
 
c3d8605
aca98af
90ff42e
 
 
aca98af
 
90ff42e
f504910
aca98af
740153b
aca98af
 
 
1cc8cac
aca98af
 
 
1cc8cac
aca98af
90ff42e
920afea
aca98af
 
66370da
f504910
aca98af
 
 
26e2298
920afea
e2db038
920afea
 
f504910
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aca98af
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import numpy as np
import cv2
import os
from PIL import Image
import torchvision.transforms as transforms
import gradio as gr
from yolov5 import xai_yolov5
from yolov8 import xai_yolov8s

def process_image(image, yolo_versions=["yolov5"]):
    image = np.array(image)
    image = cv2.resize(image, (640, 640))
    result_images = []
    for yolo_version in yolo_versions:
        if yolo_version == "yolov5":
            result_images.append(xai_yolov5(image)) 
        elif yolo_version == "yolov8s":
            result_images.append(xai_yolov8s(image))
        else:
            result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
    return result_images


sample_images = {
    "Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
    "Sample 2":  os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
}
def load_sample_image(sample_name):
    image_path = sample_images.get(sample_name)
    if image_path and os.path.exists(image_path):
        return Image.open(image_path)
    return None

default_sample_image = load_sample_image("Sample 1")
"""
with gr.Blocks() as interface:
    gr.Markdown("# XAI: Upload an image to visualize object detection of your models..")
    gr.Markdown("Upload an image or select a sample image to visualize object detection.")

    with gr.Row():
        uploaded_image = gr.Image(type="pil", label="Upload an Image")
        sample_selection = gr.Dropdown(
            choices=list(sample_images.keys()),
            label="Select a Sample Image",
            type="value",
        )
        sample_display = gr.Image(label="Sample Image Preview", value=default_sample_image)
        sample_selection.change(fn=load_sample_image, inputs=sample_selection, outputs=sample_display)

    selected_models = gr.CheckboxGroup(
        choices=["yolov5", "yolov8s"],
        value=["yolov5"], 
        label="Select Model(s)",
    )
    result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)
    gr.Button("Run").click(
        fn=process_image,
        inputs=[uploaded_image, selected_models],
        outputs=result_gallery,
    )
"""

with gr.Blocks() as interface:
    gr.Markdown("# XAI: Upload an image to visualize object detection of your models..")
    gr.Markdown("Select a sample image to visualize object detection.")

    with gr.Row():
        sample_selection = gr.RadioButtons(
            choices=list(sample_images.keys()),
            label="Select a Sample Image",
            type="value",
        )
        sample_display = gr.Image(label="Sample Image Preview", value=default_sample_image)
        sample_selection.change(fn=load_sample_image, inputs=sample_selection, outputs=sample_display)

    selected_models = gr.CheckboxGroup(
        choices=["yolov5", "yolov8s"],
        value=["yolov5"], 
        label="Select Model(s)",
    )
    result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)
    gr.Button("Run").click(
        fn=process_image,
        inputs=[sample_selection, selected_models],
        outputs=result_gallery,
    )

interface.launch()