BhumikaMak commited on
Commit
6c34a8c
·
1 Parent(s): a83e547

Debug: refactor src

Browse files
Files changed (3) hide show
  1. app.py +2 -2
  2. requirements.txt +0 -1
  3. yolov8.py +29 -67
app.py CHANGED
@@ -14,7 +14,7 @@ def process_image(image, yolo_versions=["yolov5"]):
14
  for yolo_version in yolo_versions:
15
  if yolo_version == "yolov5":
16
  result_images.append(xai_yolov5(image))
17
- elif yolo_version == "yolov8n":
18
  result_images.append(xai_yolov8n(image))
19
  else:
20
  result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
@@ -26,7 +26,7 @@ interface = gr.Interface(
26
  inputs=[
27
  gr.Image(type="pil", label="Upload an Image"),
28
  gr.CheckboxGroup(
29
- choices=["yolov5", "yolov8n", "yolov10"],
30
  value=["yolov5"], # Set default selection to YOLOv5
31
  label="Select Model(s)",
32
  )
 
14
  for yolo_version in yolo_versions:
15
  if yolo_version == "yolov5":
16
  result_images.append(xai_yolov5(image))
17
+ elif yolo_version == "yolov8s":
18
  result_images.append(xai_yolov8n(image))
19
  else:
20
  result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
 
26
  inputs=[
27
  gr.Image(type="pil", label="Upload an Image"),
28
  gr.CheckboxGroup(
29
+ choices=["yolov5", "yolov8s", "yolov10"],
30
  value=["yolov5"], # Set default selection to YOLOv5
31
  label="Select Model(s)",
32
  )
requirements.txt CHANGED
@@ -8,4 +8,3 @@ grad-cam==1.4.8
8
  gradio
9
  ultralytics
10
  torchcam
11
- YOLOv8-Explainer
 
8
  gradio
9
  ultralytics
10
  torchcam
 
yolov8.py CHANGED
@@ -1,4 +1,3 @@
1
- from ultralytics import YOLO
2
  import torch
3
  import cv2
4
  import numpy as np
@@ -12,17 +11,18 @@ import gradio as gr
12
  COLORS = np.random.uniform(0, 255, size=(80, 3))
13
 
14
  def parse_detections(results):
 
15
  boxes, colors, names = [], [], []
16
- for result in results:
17
- # Accessing boxes directly from the result
18
- for box in result.boxes:
19
- xmin, ymin, xmax, ymax = box.xyxy[0].int().tolist() # Convert to list of integers
20
- category = int(box.cls[0].item()) # Class index
21
- name = result.names[category] # Get class name from names
22
- boxes.append((xmin, ymin, xmax, ymax))
23
- colors.append(COLORS[category]) # Ensure COLORS is defined elsewhere in your code
24
- names.append(name)
25
-
26
  return boxes, colors, names
27
 
28
  def draw_detections(boxes, colors, names, img):
@@ -34,12 +34,15 @@ def draw_detections(boxes, colors, names, img):
34
  lineType=cv2.LINE_AA)
35
  return img
36
 
37
-
38
  def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
39
  cam = EigenCAM(model, target_layers)
40
  grayscale_cam = cam(tensor)[0, :, :]
41
  img_float = np.float32(rgb_img) / 255
 
 
42
  cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
 
 
43
  renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
44
  for x1, y1, x2, y2 in boxes:
45
  renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
@@ -48,69 +51,28 @@ def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
48
 
49
  return cam_image, renormalized_cam_image
50
 
51
-
52
- def xai_yolov8nnnn(image):
53
- model = YOLO('yolov8n.pt') # Load YOLOv8n pre-trained weights
54
  model.eval()
55
-
56
- # Check if GPU is available and use it
57
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
58
- model.to(device)
59
 
60
  target_layers = [model.model.model[-2]] # Grad-CAM target layer
61
-
62
- # Process the image through the model
63
- results = model([image])
64
-
65
- # If results are a list, extract the first element (detected results)
66
- if isinstance(results, list):
67
- results = results[0] # Extracting the first result (if list)
68
-
69
- # Ensure that outputs are in tensor form
70
- logits = results.pred[0] # Get the prediction tensor from the results
71
 
72
- # Parse the detections
73
- boxes, colors, names = parse_detections([results]) # Ensure results are passed as a list
 
74
  detections_img = draw_detections(boxes, colors, names, image.copy())
75
-
76
- # Prepare image for Grad-CAM
77
  img_float = np.float32(image) / 255
78
  transform = transforms.ToTensor()
79
- tensor = transform(img_float).unsqueeze(0).to(device) # Ensure tensor is on the right device
80
-
81
- # Generate CAM images
82
  cam_image, renormalized_cam_image = generate_cam_image(model, target_layers, tensor, image, boxes)
83
 
84
- # Combine original image, CAM image, and renormalized CAM image
85
  final_image = np.hstack((image, cam_image, renormalized_cam_image))
86
-
87
- # Return final image and a caption
88
- caption = "Results using YOLOv8n"
89
  return Image.fromarray(final_image), caption
90
-
91
-
92
-
93
- from YOLOv8_Explainer import yolov8_heatmap, display_images
94
-
95
- def xai_yolov8n(image):
96
- model = yolov8_heatmap(
97
- weight="yolov8n.pt",
98
- conf_threshold=0.4,
99
- device = "cpu",
100
- method = "EigenCAM",
101
- layer=[10, 12, 14, 16, 18, -3],
102
- backward_type="all",
103
- ratio=0.02,
104
- show_box=True,
105
- renormalize=False,
106
- )
107
-
108
- # Pass the NumPy array to the model
109
- imagelist = model(image) # Use the image array directly
110
-
111
- # Display the resulting images
112
- # Pass the NumPy array to the model
113
- imagelist = model(image) # Use the image array directly
114
-
115
- # Display the resulting images
116
- print(imagelist)
 
 
1
  import torch
2
  import cv2
3
  import numpy as np
 
11
  COLORS = np.random.uniform(0, 255, size=(80, 3))
12
 
13
  def parse_detections(results):
14
+ detections = results.pandas().xyxy[0].to_dict()
15
  boxes, colors, names = [], [], []
16
+ for i in range(len(detections["xmin"])):
17
+ confidence = detections["confidence"][i]
18
+ if confidence < 0.2:
19
+ continue
20
+ xmin, ymin = int(detections["xmin"][i]), int(detections["ymin"][i])
21
+ xmax, ymax = int(detections["xmax"][i]), int(detections["ymax"][i])
22
+ name, category = detections["name"][i], int(detections["class"][i])
23
+ boxes.append((xmin, ymin, xmax, ymax))
24
+ colors.append(COLORS[category])
25
+ names.append(name)
26
  return boxes, colors, names
27
 
28
  def draw_detections(boxes, colors, names, img):
 
34
  lineType=cv2.LINE_AA)
35
  return img
36
 
 
37
  def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
38
  cam = EigenCAM(model, target_layers)
39
  grayscale_cam = cam(tensor)[0, :, :]
40
  img_float = np.float32(rgb_img) / 255
41
+
42
+ # Generate Grad-CAM
43
  cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
44
+
45
+ # Renormalize Grad-CAM inside bounding boxes
46
  renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
47
  for x1, y1, x2, y2 in boxes:
48
  renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
 
51
 
52
  return cam_image, renormalized_cam_image
53
 
54
+ def xai_yolov8s(image):
55
+ # Load YOLOv8 model
56
+ model = torch.hub.load('ultralytics/yolov8', 'yolov8s', pretrained=True)
57
  model.eval()
58
+ model.cpu()
 
 
 
59
 
60
  target_layers = [model.model.model[-2]] # Grad-CAM target layer
 
 
 
 
 
 
 
 
 
 
61
 
62
+ # Run YOLO detection
63
+ results = model([image])
64
+ boxes, colors, names = parse_detections(results)
65
  detections_img = draw_detections(boxes, colors, names, image.copy())
66
+
67
+ # Prepare input tensor for Grad-CAM
68
  img_float = np.float32(image) / 255
69
  transform = transforms.ToTensor()
70
+ tensor = transform(img_float).unsqueeze(0)
71
+
72
+ # Grad-CAM visualization
73
  cam_image, renormalized_cam_image = generate_cam_image(model, target_layers, tensor, image, boxes)
74
 
75
+ # Combine results
76
  final_image = np.hstack((image, cam_image, renormalized_cam_image))
77
+ caption = "Results using YOLOv8"
 
 
78
  return Image.fromarray(final_image), caption