BhumikaMak commited on
Commit
cb8e09a
·
verified ·
1 Parent(s): 13a166e

merged code

Browse files
Files changed (1) hide show
  1. app.py +254 -0
app.py CHANGED
@@ -119,8 +119,148 @@ body {
119
  font-weight: bold;
120
  color: #1976d2;
121
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  """
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  with gr.Blocks(css=custom_css) as demo:
125
  gr.HTML("""
126
  <div style="border: 2px solid #a05252; padding: 20px; border-radius: 8px;">
@@ -144,6 +284,120 @@ with gr.Blocks(css=custom_css) as demo:
144
  gr.Image(yolov8_result, label="Detections & Interpretability Map")
145
  gr.Markdown(description_yolov8)
146
  gr.Image(yolov8_dff, label="Feature Factorization & discovered concept")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
 
149
  demo.launch()
 
119
  font-weight: bold;
120
  color: #1976d2;
121
  }
122
+ .custom-row {
123
+ display: flex;
124
+ justify-content: center; /* Align horizontally */
125
+ align-items: center; /* Align vertically */
126
+ padding: 10px; /* Adjust as needed for spacing */
127
+ }
128
+ .custom-button {
129
+ background-color: #800000;
130
+ color: white;
131
+ font-size: 12px; /* Small font size */
132
+ width: 100px !important; /* Fixed width */
133
+ height: 35px !important; /* Fixed height */
134
+ border-radius: 6px; /* Slightly rounded corners */
135
+ padding: 0 !important; /* Remove extra padding */
136
+ cursor: pointer;
137
+ text-align: center;
138
+ margin: 0 auto; /* Center within its container */
139
+ box-sizing: border-box; /* Ensure consistent sizing */
140
+ }
141
+ #run-button {
142
+ background-color: #800000 !important;
143
+ color: white !important;
144
+ font-size: 12px !important; /* Small font size */
145
+ width: 100px !important; /* Fixed width */
146
+ height: 35px !important; /* Fixed height */
147
+ border-radius: 6px !important;
148
+ padding: 0 !important;
149
+ text-align: center !important;
150
+ display: block !important; /* Ensure block-level alignment */
151
+ margin: 0 auto !important; /* Center horizontally */
152
+ box-sizing: border-box !important;
153
+ }
154
+ /* Custom border styles for all Gradio components */
155
+ .gradio-container, .gradio-row, .gradio-column, .gradio-input, .gradio-image, .gradio-checkgroup, .gradio-button, .gradio-markdown {
156
+ border: 3px #800000 !important; /* Border width and color */
157
+ border-radius: 8px !important; /* Rounded corners */
158
+ }
159
+ /* Additional customizations for images to enhance visibility of the border */
160
+ .gradio-image img {
161
+ border-radius: 8px !important;
162
+ border: 3px solid black !important; /* Border for image */
163
+ }
164
+ /* Custom Row for images and buttons */
165
+ .custom-row img {
166
+ border-radius: 10px;
167
+ box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
168
+ }
169
+ #highlighted-text {
170
+ font-weight: bold;
171
+ color: #1976d2;
172
+ }
173
+ .gradio-block {
174
+ max-height: 100vh; /* Allow scrolling within the Gradio blocks */
175
+ overflow-y: auto; /* Enable scrolling for the content if it overflows */
176
+ }
177
+ #neural-vista-title {
178
+ color: #800000 !important; /* Purple color for the title */
179
+ font-size: 32px; /* Adjust font size as needed */
180
+ font-weight: bold;
181
+ text-align: center;
182
+ }
183
+ #neural-vista-text {
184
+ color: #800000 !important; /* Purple color for the title */
185
+ font-size: 18px; /* Adjust font size as needed */
186
+ font-weight: bold;
187
+ text-align: center;
188
+
189
+ }
190
+
191
+
192
  """
193
 
194
+ import netron
195
+ import threading
196
+ import gradio as gr
197
+ import os
198
+ from PIL import Image
199
+ import cv2
200
+ import numpy as np
201
+ from yolov5 import xai_yolov5
202
+ from yolov8 import xai_yolov8s
203
+
204
+ # Sample images directory
205
+ sample_images = {
206
+ "Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
207
+ "Sample 2": os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
208
+ }
209
+
210
+ def load_sample_image(sample_name):
211
+ """Load a sample image based on user selection."""
212
+ image_path = sample_images.get(sample_name)
213
+ if image_path and os.path.exists(image_path):
214
+ return Image.open(image_path)
215
+ return None
216
+
217
+ def process_image(sample_choice, uploaded_image, yolo_versions, target_lyr = -5, n_components = 8):
218
+ """Process the image using selected YOLO models."""
219
+ # Load sample or uploaded image
220
+ if uploaded_image is not None:
221
+ image = uploaded_image
222
+ else:
223
+ image = load_sample_image(sample_choice)
224
+
225
+ # Preprocess image
226
+ image = np.array(image)
227
+ image = cv2.resize(image, (640, 640))
228
+ result_images = []
229
+
230
+ # Apply selected models
231
+ for yolo_version in yolo_versions:
232
+ if yolo_version == "yolov5":
233
+ result_images.append(xai_yolov5(image, target_lyr = -5, n_components = 8))
234
+ elif yolo_version == "yolov8s":
235
+ result_images.append(xai_yolov8s(image))
236
+ else:
237
+ result_images.append((Image.fromarray(image), f"{yolo_version} not implemented."))
238
+ return result_images
239
+
240
+ def view_model(selected_models):
241
+ """Generate Netron visualization for the selected models."""
242
+ netron_html = ""
243
+ for model in selected_models:
244
+ if model=="yolov8s":
245
+ netron_html = f"""
246
+ <iframe
247
+ src="https://netron.app/?url=https://huggingface.co/spaces/BhumikaMak/NeuralVista/resolve/main/weight_files/yolov8s.pt"
248
+ width="100%"
249
+ height="800"
250
+ frameborder="0">
251
+ </iframe>
252
+ """
253
+ if model == "yolov5":
254
+ netron_html = f"""
255
+ <iframe
256
+ src="https://netron.app/?url=https://huggingface.co/FFusion/FFusionXL-BASE/blob/main/vae_encoder/model.onnx"
257
+ width="100%"
258
+ height="800"
259
+ frameborder="0">
260
+ </iframe>
261
+ """
262
+ return netron_html if netron_html else "<p>No valid models selected for visualization.</p>"
263
+
264
  with gr.Blocks(css=custom_css) as demo:
265
  gr.HTML("""
266
  <div style="border: 2px solid #a05252; padding: 20px; border-radius: 8px;">
 
284
  gr.Image(yolov8_result, label="Detections & Interpretability Map")
285
  gr.Markdown(description_yolov8)
286
  gr.Image(yolov8_dff, label="Feature Factorization & discovered concept")
287
+
288
+ default_sample = "Sample 1"
289
+
290
+ with gr.Row():
291
+ # Left side: Sample selection and image upload
292
+ with gr.Column():
293
+ sample_selection = gr.Radio(
294
+ choices=list(sample_images.keys()),
295
+ label="Select a Sample Image",
296
+ value=default_sample,
297
+ )
298
+
299
+ upload_image = gr.Image(
300
+ label="Upload an Image",
301
+ type="pil",
302
+ )
303
+
304
+ selected_models = gr.CheckboxGroup(
305
+ choices=["yolov5", "yolov8s"],
306
+ value=["yolov5"],
307
+ label="Select Model(s)",
308
+ )
309
+ #with gr.Row(elem_classes="custom-row"):
310
+ run_button = gr.Button("Run", elem_id="run-button")
311
+
312
+
313
+ with gr.Column():
314
+ sample_display = gr.Image(
315
+ value=load_sample_image(default_sample),
316
+ label="Selected Sample Image",
317
+ )
318
+
319
+ gr.HTML("""
320
+ <span style="font-size: 14px; font-weight: bold;">The visualization demonstrates object detection and interpretability. Detected objects are highlighted with bounding boxes, while the heatmap reveals regions of focus, offering insights into the model's decision-making process.</span>
321
+ """)
322
+ # Results and visualization
323
+ with gr.Row(elem_classes="custom-row"):
324
+ result_gallery = gr.Gallery(
325
+ label="Results",
326
+ rows=1,
327
+ height="auto", # Adjust height automatically based on content
328
+ columns=1 ,
329
+ object_fit="contain"
330
+ )
331
+ netron_display = gr.HTML(label="Netron Visualization")
332
+
333
+ # Update sample image
334
+ sample_selection.change(
335
+ fn=load_sample_image,
336
+ inputs=sample_selection,
337
+ outputs=sample_display,
338
+ )
339
+
340
+
341
+ gr.HTML("""
342
+ <span style="font-size: 14px; ">
343
+ <span style="color: #800000;">Concept Discovery</span> is the process of uncovering the hidden, high-level features that a deep learning model has learned. It provides a way to understand the essence of its internal representations, akin to peering into the mind of the model and revealing the meaningful patterns it detects in the data.
344
+ <br><br>
345
+ <span style="color: #800000;">Deep Feature Factorization</span> (DFF) serves as a tool for breaking down these complex features into simpler, more interpretable components. By applying matrix factorization on activation maps, it untangles the intricate web of learned representations, making it easier to comprehend what the model is truly focusing on. Together, these methods bring us closer to understanding the underlying logic of neural networks, shedding light on the often enigmatic decisions they make.
346
+ </span>
347
+ """)
348
+
349
+ with gr.Row(elem_classes="custom-row"):
350
+ dff_gallery = gr.Gallery(
351
+ label="Deep Feature Factorization",
352
+ rows=2, # 8 rows
353
+ columns=4, # 1 image per row
354
+ object_fit="fit",
355
+ height="auto" # Adjust as needed
356
+ )
357
+
358
+ # Multi-threaded processing
359
+ def run_both(sample_choice, uploaded_image, selected_models):
360
+ results = []
361
+ netron_html = ""
362
+
363
+ # Thread to process the image
364
+ def process_thread():
365
+ nonlocal results
366
+ target_lyr = -5
367
+ n_components = 8
368
+ results = process_image(sample_choice, uploaded_image, selected_models, target_lyr = -5, n_components = 8)
369
+
370
+ # Thread to generate Netron visualization
371
+ def netron_thread():
372
+ nonlocal netron_html
373
+ gr.HTML("""
374
+ Generated abstract visualizations of model""")
375
+ netron_html = view_model(selected_models)
376
+
377
+ # Launch threads
378
+ t1 = threading.Thread(target=process_thread)
379
+ t2 = threading.Thread(target=netron_thread)
380
+ t1.start()
381
+ t2.start()
382
+ t1.join()
383
+ t2.join()
384
+ image1, text, image2 = results[0]
385
+ if isinstance(image2, list):
386
+ # Check if image2 contains exactly 8 images
387
+ if len(image2) == 8:
388
+ print("image2 contains 8 images.")
389
+ else:
390
+ print("Warning: image2 does not contain exactly 8 images.")
391
+ else:
392
+ print("Error: image2 is not a list of images.")
393
+ return [(image1, text)], netron_html, image2
394
+
395
+ # Run button click
396
+ run_button.click(
397
+ fn=run_both,
398
+ inputs=[sample_selection, upload_image, selected_models],
399
+ outputs=[result_gallery, netron_display, dff_gallery],
400
+ )
401
 
402
 
403
  demo.launch()