File size: 5,226 Bytes
60af537
57419d8
9b2c5e1
57419d8
 
 
 
d4cb7c6
57419d8
24f4b49
f7b8e0e
 
c3d8605
6eea241
aca98af
90ff42e
 
 
aca98af
 
90ff42e
aca98af
d3127bb
 
 
20ca536
d3127bb
 
 
fa09b4a
8134c9f
fa09b4a
 
 
 
 
d3127bb
 
 
 
 
 
 
 
 
 
 
f504910
5f26eb1
155613f
aa1739e
 
 
 
155613f
 
 
39af6cb
155613f
 
 
 
 
 
aa1739e
 
 
8bec0f2
aa1739e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5b56dc
5a16208
 
6eea241
7838123
 
408a665
 
 
 
 
 
 
 
b30ea65
408a665
39af6cb
408a665
 
 
 
6eea241
408a665
 
 
 
20ca536
408a665
 
 
 
 
 
7991981
408a665
 
 
 
 
6eea241
408a665
70f0887
408a665
 
 
 
 
1a11002
5325ebe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import numpy as np
import cv2
import os
from PIL import Image
import torchvision.transforms as transforms
import gradio as gr
from yolov5 import xai_yolov5
from yolov8 import xai_yolov8s

sample_images = {
    "Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
    "Sample 2":  os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
}

def load_sample_image(sample_name):
    image_path = sample_images.get(sample_name)
    if image_path and os.path.exists(image_path):
        return Image.open(image_path)
    return None

default_sample_image = load_sample_image("Sample 1")

def load_sample_image(choice):
    if choice in sample_images:
        image_path = sample_images[choice]
        return cv2.imread(image_path)[:, :, ::-1]  
    else:
        raise ValueError("Invalid sample selection.")

def process_image(sample_choice, uploaded_image, yolo_versions=["yolov5"]):
    print(sample_choice, upload_image)
    if uploaded_image is not None:
        image = uploaded_image  # Use the uploaded image
    else:
        # Otherwise, use the selected sample image
        image = load_sample_image(sample_choice)
    image = np.array(image)
    image = cv2.resize(image, (640, 640))
    result_images = []
    for yolo_version in yolo_versions:
        if yolo_version == "yolov5":
            result_images.append(xai_yolov5(image)) 
        elif yolo_version == "yolov8s":
            result_images.append(xai_yolov8s(image))
        else:
            result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
    return result_images

with gr.Blocks() as interface:
    # Update CSS to make text white and set dark mode
    gr.HTML("""
    <style>
        body {
            background-color: black;
            color: #FFFFFF;  /* Set the default text color to white */
        }
        .gradio-container {
            color: #D3D3D3;  /* Ensure Gradio components also have white text */
        }
        h1, h2, h3, h4, h5, h6, p, label {
            color: #FFFFFF !important; /* Force all headings, paragraphs, and labels to white */
        }
        .gr-markdown {
            color: #FFFFFF !important; /* Ensure Markdown text is white */
        }
        .gr-button {
            background-color: #007bff; /* Optional: Change button background color */
            color: #D3D3D3; /* Ensure button text is light grey */
        }
        .gr-button:hover {
            background-color: #0056b3; /* Optional: Change button hover color */
        }
    </style>   
    """)

    # with gr.Blocks() as interface:
    # # Update CSS to make text light grey
    # gr.HTML("""
    # <style>
    #     body {
    #         background-color: black;
    #         color: #D3D3D3;  /* Set the default text color to light grey */
    #     }
    #     .gradio-container {
    #         color: #D3D3D3;  /* Ensure Gradio components also have light grey text */
    #     }
    #     h1, h2, h3, h4, h5, h6, p, label {
    #         color: #FFFFFF; /* Make all headings and labels light grey */
    #     }
    #     .gr-markdown {
    #         color: #FFFFFF; /* Ensure Markdown text is light grey */
    #     }
    #     .gr-button {
    #         background-color: #007bff; /* Optional: Change button background color */
    #         color: #D3D3D3; /* Ensure button text is light grey */
    #     }
    #     .gr-button:hover {
    #         background-color: #0056b3; /* Optional: Change button hover color */
    #     }
    # </style>   
    # """)
    
    
    gr.Markdown("<h1 style='color: #FFFFFF;'>XAI: Visualize Object Detection of Your Models</h1>")
    gr.Markdown("<p style='color: #FFFFFF;'>Select a sample image to visualize object detection.</p>")
    
    default_sample = "Sample 1"
    
    with gr.Row(elem_classes="orchid-green-bg"):
        # Left side: Sample selection and upload image
        with gr.Column():
            sample_selection = gr.Radio(
                choices=list(sample_images.keys()),
                label="Select a Sample Image",
                type="value",
                value=default_sample,  # Set default selection
            )
            # Upload image below sample selection
            gr.Markdown("<h2 style='color: #FFFFFF>Or upload your own image:</h2>")
            upload_image = gr.Image(
                label="Upload an Image",
                type="pil",  # Correct type for file path compatibility
            )
        
        # Right side: Selected sample image display
        sample_display = gr.Image(
            value=load_sample_image(default_sample),  
            label="Selected Sample Image",
        )
    
    sample_selection.change(
        fn=load_sample_image,
        inputs=sample_selection,
        outputs=sample_display,
    )

    selected_models = gr.CheckboxGroup(
        choices=["yolov5", "yolov8s"],
        value=["yolov5"],
        label="Select Model(s)",
    )
    
    result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)

    gr.Button("Run").click(
        fn=process_image,
        inputs=[sample_selection, upload_image, selected_models],  # Include both options
        outputs=result_gallery,
    )

interface.launch(share=True)