RuoyuChen commited on
Commit
4dca37a
·
1 Parent(s): bdf21bc

first commit

Browse files
app.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ import cv2
7
+ import matplotlib
8
+
9
+ import clip
10
+
11
+ from utils import *
12
+
13
+ matplotlib.get_cachedir()
14
+ plt.rc('font', family="Times New Roman")
15
+ from sklearn import metrics
16
+
17
+ import torch
18
+ from torchvision import transforms
19
+
20
+ from models.submodular_vit_efficient_plus import MultiModalSubModularExplanationEfficientPlus
21
+
22
+ data_transform = transforms.Compose(
23
+ [
24
+ transforms.Resize(
25
+ (224,224), interpolation=transforms.InterpolationMode.BICUBIC
26
+ ),
27
+ # transforms.CenterCrop(224),
28
+ transforms.ToTensor(),
29
+ transforms.Normalize(
30
+ mean=(0.48145466, 0.4578275, 0.40821073),
31
+ std=(0.26862954, 0.26130258, 0.27577711),
32
+ ),
33
+ ]
34
+ )
35
+
36
+ class CLIPModel_Super(torch.nn.Module):
37
+ def __init__(self,
38
+ type="ViT-L/14",
39
+ download_root=None,
40
+ device = "cuda"):
41
+ super().__init__()
42
+ self.device = device
43
+ self.model, _ = clip.load(type, device=self.device, download_root=download_root)
44
+
45
+ self.model = self.model.float()
46
+
47
+ def forward(self, vision_inputs):
48
+ """
49
+ Input:
50
+ vision_inputs: torch.size([B,C,W,H])
51
+ Output:
52
+ embeddings: a d-dimensional vector torch.size([B,d])
53
+ """
54
+ vision_inputs = vision_inputs.type(torch.float32)
55
+
56
+ with torch.no_grad():
57
+ image_features = self.model.encode_image(vision_inputs)
58
+ image_features /= image_features.norm(dim=-1, keepdim=True)
59
+
60
+ return image_features
61
+
62
+ def transform_vision_data(image):
63
+ """
64
+ Input:
65
+ image: An image read by opencv [w,h,c]
66
+ Output:
67
+ image: After preproccessing, is a tensor [c,w,h]
68
+ """
69
+ image = Image.fromarray(image)
70
+ image = data_transform(image)
71
+ return image
72
+
73
+ device = "cuda" if torch.cuda.is_available() else "cpu"
74
+ # Instantiate model
75
+ vis_model = CLIPModel_Super("ViT-L/14", device=device, download_root="./ckpt")
76
+ vis_model.eval()
77
+ vis_model.to(device)
78
+ print("load clip model")
79
+
80
+ semantic_path = "./clip_vitl_imagenet_zeroweights.pt"
81
+ if os.path.exists(semantic_path):
82
+ semantic_feature = torch.load(semantic_path, map_location="cpu")
83
+ semantic_feature = semantic_feature.to(device)
84
+ semantic_feature = semantic_feature.type(torch.float32)
85
+
86
+ explainer = MultiModalSubModularExplanationEfficientPlus(
87
+ vis_model, semantic_feature, transform_vision_data, device=device,
88
+ lambda1=0.01,
89
+ lambda2=0.05,
90
+ lambda3=20.,
91
+ lambda4=5.)
92
+
93
+ def add_value_decrease(smdl_mask, json_file):
94
+ single_mask = np.zeros_like(smdl_mask[0].mean(-1))
95
+
96
+ value_list_1 = np.array(json_file["consistency_score"]) + np.array(json_file["collaboration_score"])
97
+
98
+ value_list_2 = np.array([json_file["baseline_score"]] + json_file["consistency_score"][:-1]) + np.array([1 - json_file["org_score"]] + json_file["collaboration_score"][:-1])
99
+
100
+ value_list = value_list_1 - value_list_2
101
+
102
+ values = []
103
+ value = 0
104
+ for smdl_single_mask, smdl_value in zip(smdl_mask, value_list):
105
+ value = value - abs(smdl_value)
106
+ single_mask[smdl_single_mask.sum(-1)>0] = value
107
+ values.append(value)
108
+
109
+ attribution_map = single_mask - single_mask.min()
110
+ attribution_map /= attribution_map.max()
111
+
112
+ return attribution_map, np.array(values)
113
+
114
+ def visualization(image, submodular_image_set, saved_json_file, vis_image, index=None, compute_params=True):
115
+
116
+ insertion_ours_images = []
117
+ # deletion_ours_images = []
118
+
119
+ insertion_image = submodular_image_set[0] - submodular_image_set[0]
120
+ insertion_ours_images.append(insertion_image)
121
+ # deletion_ours_images.append(image - insertion_image)
122
+ for smdl_sub_mask in submodular_image_set[:]:
123
+ insertion_image = insertion_image.copy() + smdl_sub_mask
124
+ insertion_ours_images.append(insertion_image)
125
+ # deletion_ours_images.append(image - insertion_image)
126
+
127
+ insertion_ours_images_input_results = np.array([1-saved_json_file["collaboration_score"][-1]] + saved_json_file["consistency_score"])
128
+
129
+ if index == None:
130
+ ours_best_index = np.argmax(insertion_ours_images_input_results)
131
+ else:
132
+ ours_best_index = index
133
+ x = [(insertion_ours_image.sum(-1)!=0).sum() / (image.shape[0] * image.shape[1]) for insertion_ours_image in insertion_ours_images]
134
+ i = len(x)
135
+
136
+ fig, [ax1, ax2, ax3] = plt.subplots(1,3, gridspec_kw = {'width_ratios':[1, 1, 1.5]}, figsize=(30,8))
137
+ ax1.spines["left"].set_visible(False)
138
+ ax1.spines["right"].set_visible(False)
139
+ ax1.spines["top"].set_visible(False)
140
+ ax1.spines["bottom"].set_visible(False)
141
+ ax1.xaxis.set_visible(False)
142
+ ax1.yaxis.set_visible(False)
143
+ ax1.set_title('Attribution Map', fontsize=54)
144
+ ax1.set_facecolor('white')
145
+ ax1.imshow(vis_image.astype(np.uint8))
146
+
147
+ ax2.spines["left"].set_visible(False)
148
+ ax2.spines["right"].set_visible(False)
149
+ ax2.spines["top"].set_visible(False)
150
+ ax2.spines["bottom"].set_visible(False)
151
+ ax2.xaxis.set_visible(True)
152
+ ax2.yaxis.set_visible(False)
153
+ ax2.set_title('Searched Region', fontsize=54)
154
+ ax2.set_facecolor('white')
155
+ ax2.set_xlabel("Highest conf. {:.4f}".format(insertion_ours_images_input_results.max()), fontsize=44)
156
+ ax2.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
157
+
158
+ ax3.set_xlim((0, 1))
159
+ ax3.set_ylim((0, 1))
160
+
161
+ ax3.set_ylabel('Recognition Score', fontsize=44)
162
+ ax3.set_xlabel('Percentage of image revealed', fontsize=44)
163
+ ax3.tick_params(axis='both', which='major', labelsize=36)
164
+
165
+ x_ = x[:i]
166
+ ours_y = insertion_ours_images_input_results[:i]
167
+ ax3.plot(x_, ours_y, color='dodgerblue', linewidth=3.5) # draw curve
168
+ ax3.set_facecolor('white')
169
+ ax3.spines['bottom'].set_color('black')
170
+ ax3.spines['bottom'].set_linewidth(2.0)
171
+ ax3.spines['top'].set_color('none')
172
+ ax3.spines['left'].set_color('black')
173
+ ax3.spines['left'].set_linewidth(2.0)
174
+ ax3.spines['right'].set_color('none')
175
+
176
+ # plt.legend(["Ours"], fontsize=40, loc="upper left")
177
+ ax3.scatter(x_[-1], ours_y[-1], color='dodgerblue', s=54) # Plot latest point
178
+ # 在曲线下方填充淡蓝色
179
+ ax3.fill_between(x_, ours_y, color='dodgerblue', alpha=0.1)
180
+
181
+ kernel = np.ones((3, 3), dtype=np.uint8)
182
+ # ax3.plot([x_[ours_best_index], x_[ours_best_index]], [0, 1], color='red', linewidth=3.5) # 绘制红色曲线
183
+ ax3.axvline(x=x_[int(ours_best_index)], color='red', linewidth=3.5) # 绘制红色垂直线
184
+
185
+ # Ours
186
+ mask = (image - insertion_ours_images[int(ours_best_index)]).mean(-1)
187
+ mask[mask>0] = 1
188
+
189
+ if int(ours_best_index) != 0:
190
+ dilate = cv2.dilate(mask, kernel, 3)
191
+ # erosion = cv2.erode(dilate, kernel, iterations=3)
192
+ # dilate = cv2.dilate(erosion, kernel, 2)
193
+ edge = dilate - mask
194
+ # erosion = cv2.erode(dilate, kernel, iterations=1)
195
+
196
+ image_debug = image.copy()
197
+
198
+ image_debug[mask>0] = image_debug[mask>0] * 0.5
199
+ if int(ours_best_index) != 0:
200
+ image_debug[edge>0] = np.array([255,0,0])
201
+ ax2.imshow(image_debug)
202
+
203
+ if compute_params:
204
+ auc = metrics.auc(x, insertion_ours_images_input_results)
205
+
206
+ ax3.set_title('Insertion Curve', fontsize=54)
207
+
208
+ fig.canvas.draw()
209
+ img_curve = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
210
+ img_curve = img_curve.reshape(fig.canvas.get_width_height()[::-1] + (3,))
211
+
212
+ plt.close(fig) # 关闭图形以释放资源
213
+
214
+ if compute_params:
215
+ return img_curve, insertion_ours_images_input_results.max(), auc, ours_best_index
216
+ else:
217
+ return img_curve
218
+
219
+ def gen_cam(image, mask):
220
+ """
221
+ Generate heatmap
222
+ :param image: [H,W,C]
223
+ :param mask: [H,W],range 0-1
224
+ :return: tuple(cam,heatmap)
225
+ """
226
+ # Read image
227
+ # image = cv2.resize(cv2.imread(image_path), (224,224))
228
+ # mask->heatmap
229
+ heatmap = cv2.applyColorMap(np.uint8(mask), cv2.COLORMAP_COOL)
230
+ heatmap = np.float32(heatmap)
231
+
232
+ # merge heatmap to original image
233
+ cam = 0.5*heatmap + 0.5*np.float32(image)
234
+ return cam, (heatmap).astype(np.uint8)
235
+
236
+ def norm_image(image):
237
+ """
238
+ Normalization image
239
+ :param image: [H,W,C]
240
+ :return:
241
+ """
242
+ image = image.copy()
243
+ image -= np.max(np.min(image), 0)
244
+ image /= np.max(image)
245
+ image *= 255.
246
+ return np.uint8(image)
247
+
248
+ def read_image(file_path):
249
+ image = Image.open(file_path)
250
+ image = image.convert("RGB")
251
+ return np.array(image)
252
+
253
+ # 使用同一个示例图像 "shark.png"
254
+ default_images = {
255
+ # "Default Image": read_image("images/shark.png"),
256
+ "Example: Tiger Shark": read_image("images/shark.png"),
257
+ "Example: Quail": read_image("images/bird.png") # 所有选项都使用相同的图片
258
+ }
259
+
260
+ def interpret_image(uploaded_image, slider, text_input):
261
+ # 使用上传的图像(如果有),否则使用生成的图像
262
+ if uploaded_image is not None:
263
+ image = np.array(uploaded_image)
264
+ else:
265
+ return None, 0, 0
266
+
267
+ image = cv2.resize(image, (224, 224))
268
+ element_sets_V = SubRegionDivision(image, mode="slico", region_size=30)
269
+
270
+ explainer.k = len(element_sets_V)
271
+
272
+ global submodular_image_set
273
+ global saved_json_file
274
+ global im
275
+ submodular_image, submodular_image_set, saved_json_file = explainer(element_sets_V, id=None)
276
+
277
+ attribution_map, value_list = add_value_decrease(submodular_image_set, saved_json_file)
278
+ im, heatmap = gen_cam(image, norm_image(attribution_map))
279
+
280
+ image_curve, highest_confidence, insertion_auc_score, ours_best_index = visualization(image, submodular_image_set, saved_json_file, im, index=None)
281
+
282
+ text_output_class = "The method explains why the CLIP (ViT-B/16) model identifies an image as {}.".format(imagenet_classes[explainer.target_label])
283
+
284
+ return image_curve, highest_confidence, insertion_auc_score, text_output_class
285
+
286
+ def visualization_slider(uploaded_image, slider):
287
+ # 使用上传的图像(如果有),否则使用生成的图像
288
+ if uploaded_image is not None:
289
+ image = np.array(uploaded_image)
290
+ else:
291
+ return None, 0, 0
292
+
293
+ image = cv2.resize(image, (224, 224))
294
+
295
+ image_curve = visualization(image, submodular_image_set, saved_json_file, im, index=slider, compute_params=False)
296
+
297
+ return image_curve
298
+
299
+ def update_image(thumbnail_name):
300
+ # 返回对应缩略图的图像数据
301
+ return default_images[thumbnail_name]
302
+
303
+ # 创建 Gradio 界面
304
+ with gr.Blocks() as demo:
305
+ with gr.Row():
306
+ with gr.Column():
307
+ # 第一排:上传图像输入框和一个缩略图
308
+ with gr.Row():
309
+ # 上传图像输入框
310
+ image_input = gr.Image(label="Upload Image", type="numpy")
311
+
312
+ # 第一个缩略图和按钮
313
+ with gr.Column():
314
+ # gr.Image(value=default_images["Default Image"], type="numpy")
315
+ # button_default = gr.Button(value="Default Image")
316
+ # button_default.click(
317
+ # fn=lambda k="Default Image": update_image(k),
318
+ # inputs=[],
319
+ # outputs=image_input
320
+ # )
321
+ gr.Textbox("Thank you for using our interpretable attribution method, which originates from the ICLR 2024 Oral paper titled \"Less is More: Fewer Interpretable Regions via Submodular Subset Selection.\" We have now implemented this method on the multimodal ViT model and achieved promising results in explaining model predictions. A key feature of our approach is its ability to clarify the reasons behind the model's prediction errors. We invite you to try out this demo and explore its capabilities. The source code is available at https://github.com/RuoyuChen10/SMDL-Attribution.\nYou can upload an image yourself or select one from the following, then click the button Interpreting Model to get the result. The demo currently does not support selecting categories or descriptions by yourself. If you are interested, you can try it from the source code.", label="Instructions for use", interactive=False)
322
+
323
+ # 第二排:两个缩略图
324
+ with gr.Row():
325
+ for key in default_images.keys():
326
+ with gr.Column():
327
+ gr.Image(value=default_images[key], type="numpy")
328
+ button = gr.Button(value=key)
329
+ button.click(
330
+ fn=lambda k=key: update_image(k),
331
+ inputs=[],
332
+ outputs=image_input
333
+ )
334
+
335
+ # 文本输入框和滑块
336
+ text_input = gr.Textbox(label="Text Input", placeholder="Enter some text here... (optional)")
337
+
338
+
339
+ with gr.Column():
340
+ # 输出图像和控件
341
+ image_output = gr.Image(label="Output Image")
342
+
343
+ slider = gr.Slider(minimum=0, maximum=50, step=1, label="Confidence Slider")
344
+
345
+ text_output_class = gr.Textbox(label="Explaining Category")
346
+ with gr.Row():
347
+ # 最高置信度和插入 AUC Score 并排显示
348
+ text_output_confidence = gr.Textbox(label="Highest Confidence")
349
+ text_output_auc = gr.Textbox(label="Insertion AUC Score")
350
+
351
+ interpret_button = gr.Button("Interpreting Model")
352
+
353
+ # 定义解释模型按钮点击事件
354
+ interpret_button.click(
355
+ fn=interpret_image,
356
+ inputs=[image_input, slider, text_input],
357
+ outputs=[image_output, text_output_confidence, text_output_auc, text_output_class]
358
+ )
359
+
360
+ # 实时更新的滑块
361
+ slider.change(
362
+ fn=visualization_slider,
363
+ inputs=[image_input, slider],
364
+ outputs=[image_output]
365
+ )
366
+
367
+ # 启动 Gradio 应用
368
+ demo.launch()
clip_vitl_imagenet_zeroweights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a28c797d2d204c666968d76ad9fb6d4bfc9de4bc39afc591ade407623fed3a85
3
+ size 1536865
images/bird.png ADDED
images/shark.png ADDED
models/submodular_vit_efficient_plus.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import numpy as np
4
+
5
+ from tqdm import tqdm
6
+ import cv2
7
+ from PIL import Image
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+
12
+ from .submodular_vit_torch import MultiModalSubModularExplanation
13
+
14
+ class MultiModalSubModularExplanationEfficientPlus(MultiModalSubModularExplanation):
15
+ def __init__(self,
16
+ model,
17
+ semantic_feature,
18
+ preproccessing_function,
19
+ k = 40,
20
+ lambda1 = 1.0,
21
+ lambda2 = 1.0,
22
+ lambda3 = 1.0,
23
+ lambda4 = 1.0,
24
+ device = "cuda",
25
+ pending_samples = 8):
26
+ super(MultiModalSubModularExplanationEfficientPlus, self).__init__(
27
+ k = k,
28
+ model = model,
29
+ semantic_feature = semantic_feature,
30
+ preproccessing_function = preproccessing_function,
31
+
32
+ lambda1 = lambda1,
33
+ lambda2 = lambda2,
34
+ lambda3 = lambda3,
35
+ lambda4 = lambda4,
36
+
37
+ device = device)
38
+
39
+ # Parameters of the submodular
40
+ self.pending_samples = pending_samples
41
+
42
+ def evaluation_maximun_sample(self,
43
+ main_set,
44
+ decrease_set,
45
+ candidate_set,
46
+ partition_image_set):
47
+ """
48
+ Given a subset, return a best sample index
49
+ """
50
+ sub_index_sets = []
51
+ for candidate_ in candidate_set:
52
+ sub_index_sets.append(
53
+ np.concatenate((main_set, np.array([candidate_]))).astype(int))
54
+
55
+
56
+ sub_index_sets_decrease = []
57
+ for candidate_ in candidate_set:
58
+ sub_index_sets_decrease.append(
59
+ np.concatenate((decrease_set, np.array([candidate_]))).astype(int))
60
+
61
+ # merge images / 组合图像
62
+ sub_images = torch.stack([
63
+ self.preproccessing_function(
64
+ self.merge_image(sub_index_set, partition_image_set)
65
+ ) for sub_index_set in sub_index_sets])
66
+
67
+ batch_input_images = sub_images.to(self.device)
68
+
69
+ with torch.no_grad():
70
+ # 2. Effectiveness Score
71
+ score_effectiveness = self.proccess_compute_effectiveness_score(sub_index_sets)
72
+ score_effectiveness_decrease = self.proccess_compute_effectiveness_score(sub_index_sets_decrease)
73
+
74
+ # 3. Consistency Score
75
+ score_consistency = self.proccess_compute_consistency_score(batch_input_images)
76
+
77
+ # 1. Confidence Score
78
+ score_confidence = self.proccess_compute_confidence_score()
79
+
80
+ # 4. Collaboration Score
81
+ sub_images_reverse = torch.stack([
82
+ self.preproccessing_function(
83
+ self.org_img - self.merge_image(sub_index_set, partition_image_set)
84
+ ) for sub_index_set in sub_index_sets])
85
+
86
+ batch_input_images_reverse = sub_images_reverse.to(self.device)
87
+
88
+ score_collaboration = 1 - self.proccess_compute_consistency_score(batch_input_images_reverse)
89
+
90
+ # submodular score
91
+ # smdl_score = self.lambda1 * score_confidence + self.lambda2 * score_effectiveness + self.lambda3 * score_consistency + self.lambda4 * score_collaborations
92
+ smdl_score = self.lambda1 * score_confidence + self.lambda2 * score_effectiveness + self.lambda3 * score_consistency + self.lambda4 * score_collaboration
93
+ arg_max_index = smdl_score.argmax().cpu().item()
94
+
95
+ # if self.lambda1 != 0:
96
+ self.saved_json_file["confidence_score_increase"].append(score_confidence[arg_max_index].cpu().item())
97
+ self.saved_json_file["effectiveness_score_increase"].append(score_effectiveness[arg_max_index].cpu().item())
98
+ self.saved_json_file["consistency_score_increase"].append(score_consistency[arg_max_index].cpu().item())
99
+ self.saved_json_file["collaboration_score_increase"].append(score_collaboration[arg_max_index].cpu().item())
100
+ self.saved_json_file["smdl_score"].append(smdl_score[arg_max_index].cpu().item())
101
+
102
+ if len(candidate_set) > self.pending_samples:
103
+ smdl_score_decrease = self.lambda1 * score_confidence + self.lambda2 * score_effectiveness_decrease + self.lambda3 * score_consistency + self.lambda4 * score_collaboration
104
+
105
+ # Select the sample with the worst score as the negative sample estimate
106
+ negtive_sampels_indexes = smdl_score_decrease.topk(self.pending_samples, largest = False).indices.cpu().numpy()
107
+
108
+ if arg_max_index in negtive_sampels_indexes:
109
+ negtive_sampels_indexes = negtive_sampels_indexes.tolist()
110
+ negtive_sampels_indexes.remove(arg_max_index)
111
+ negtive_sampels_indexes = np.array(negtive_sampels_indexes)
112
+
113
+ sub_index_negtive_sets = np.array(sub_index_sets_decrease)[negtive_sampels_indexes]
114
+
115
+ # merge images / 组合图像
116
+ sub_images_decrease = torch.stack([
117
+ self.preproccessing_function(
118
+ self.merge_image(sub_index_set, partition_image_set)
119
+ ) for sub_index_set in sub_index_negtive_sets])
120
+
121
+ sub_images_decrease_reverse = torch.stack([
122
+ self.preproccessing_function(
123
+ self.org_img - self.merge_image(sub_index_set, partition_image_set)
124
+ ) for sub_index_set in sub_index_negtive_sets])
125
+
126
+ # 2. Effectiveness Score
127
+ score_effectiveness_decrease_ = score_effectiveness_decrease[negtive_sampels_indexes]
128
+
129
+ # 3. Consistency Score
130
+ score_consistency_decrease = self.proccess_compute_consistency_score(sub_images_decrease.to(self.device))
131
+
132
+ # 1. Confidence Score
133
+ score_confidence_decrease = self.proccess_compute_confidence_score()
134
+
135
+ # 4. Collaboration Score
136
+ score_collaboration_decrease = 1 - self.proccess_compute_consistency_score(sub_images_decrease_reverse.to(self.device))
137
+
138
+ smdl_score_decrease = self.lambda1 * score_confidence_decrease + self.lambda2 * score_effectiveness_decrease_ + self.lambda3 * score_consistency_decrease + self.lambda4 * score_collaboration_decrease
139
+ arg_min_index = smdl_score_decrease.argmin().cpu().item()
140
+
141
+ decrease_set = sub_index_negtive_sets[arg_min_index]
142
+
143
+ self.saved_json_file["confidence_score_decrease"].append(score_confidence_decrease[arg_min_index].cpu().item())
144
+ self.saved_json_file["effectiveness_score_decrease"].append(score_effectiveness_decrease_[arg_min_index].cpu().item())
145
+ self.saved_json_file["consistency_score_decrease"].append(1-score_collaboration_decrease[arg_min_index].cpu().item())
146
+ self.saved_json_file["collaboration_score_decrease"].append(1-score_consistency_decrease[arg_min_index].cpu().item())
147
+
148
+ return sub_index_sets[arg_max_index], decrease_set
149
+
150
+ def save_file_init(self):
151
+ self.saved_json_file = {}
152
+ self.saved_json_file["sub-k"] = self.k
153
+
154
+ self.saved_json_file["confidence_score"] = []
155
+ self.saved_json_file["effectiveness_score"] = []
156
+ self.saved_json_file["consistency_score"] = []
157
+ self.saved_json_file["collaboration_score"] = []
158
+
159
+ self.saved_json_file["confidence_score_increase"] = []
160
+ self.saved_json_file["effectiveness_score_increase"] = []
161
+ self.saved_json_file["consistency_score_increase"] = []
162
+ self.saved_json_file["collaboration_score_increase"] = []
163
+
164
+ self.saved_json_file["confidence_score_decrease"] = []
165
+ self.saved_json_file["effectiveness_score_decrease"] = []
166
+ self.saved_json_file["consistency_score_decrease"] = []
167
+ self.saved_json_file["collaboration_score_decrease"] = []
168
+
169
+ self.saved_json_file["smdl_score"] = []
170
+ self.saved_json_file["lambda1"] = self.lambda1
171
+ self.saved_json_file["lambda2"] = self.lambda2
172
+ self.saved_json_file["lambda3"] = self.lambda3
173
+ self.saved_json_file["lambda4"] = self.lambda4
174
+
175
+ def get_merge_set(self, partition):
176
+ """
177
+ """
178
+ Subset = np.array([])
179
+ Subset_decrease = np.array([])
180
+
181
+ indexes = np.arange(len(partition))
182
+
183
+ # First calculate the similarity of each element to facilitate calculation of effectiveness score.
184
+ self.calculate_distance_of_each_element(partition)
185
+
186
+ self.smdl_score_best = 0
187
+
188
+ loop_times = int((self.k-self.pending_samples)/2) + self.pending_samples
189
+ for j in tqdm(range(loop_times)):
190
+ diff = np.setdiff1d(indexes, np.concatenate((Subset, Subset_decrease))) # in indexes but not in Subset
191
+
192
+ sub_candidate_indexes = diff
193
+ if len(diff) == 1:
194
+ Subset = np.concatenate((Subset, np.array(diff)))
195
+ break
196
+
197
+ Subset, Subset_decrease = self.evaluation_maximun_sample(Subset, Subset_decrease, sub_candidate_indexes, partition)
198
+
199
+ sub_images = torch.stack([
200
+ self.preproccessing_function(
201
+ self.org_img
202
+ ),
203
+ self.preproccessing_function(
204
+ self.org_img - self.org_img
205
+ ),
206
+ ])
207
+ scores = self.proccess_compute_consistency_score(sub_images.to(self.device))
208
+
209
+ self.saved_json_file["org_score"] = scores[0].cpu().item()
210
+ self.saved_json_file["baseline_score"] = scores[1].cpu().item()
211
+
212
+ self.saved_json_file["consistency_score"] = self.saved_json_file["consistency_score_increase"] + self.saved_json_file["consistency_score_decrease"][::-1] + [scores[0].cpu().item()]
213
+ self.saved_json_file["collaboration_score"] = self.saved_json_file["collaboration_score_increase"] + self.saved_json_file["collaboration_score_decrease"][::-1] + [1-scores[1].cpu().item()]
214
+
215
+ Subset = np.concatenate((Subset, Subset_decrease[::-1]))
216
+
217
+ return Subset.astype(int)
218
+
219
+ def __call__(self, image_set, id = None):
220
+ """
221
+ Compute Source Face Submodular Score
222
+ @image_set: [mask_image 1, ..., mask_image m] (cv2 format)
223
+ """
224
+ # V_partition = self.partition_collection(image_set) # [ [image1, image2, ...], [image1, image2, ...], ... ]
225
+
226
+ self.save_file_init()
227
+
228
+ self.org_img = np.array(image_set).sum(0).astype(np.uint8)
229
+ source_image = self.preproccessing_function(self.org_img)
230
+
231
+ self.source_feature = self.model(source_image.unsqueeze(0).to(self.device))
232
+ if id == None:
233
+ self.target_label = (self.source_feature @ self.semantic_feature.T).argmax().cpu().item()
234
+ else:
235
+ self.target_label = id
236
+
237
+ Subset_merge = np.array(image_set)
238
+ Submodular_Subset = self.get_merge_set(Subset_merge) # array([17, 42, 49, ...])
239
+
240
+ submodular_image_set = Subset_merge[Submodular_Subset] # sub_k x (112, 112, 3)
241
+
242
+
243
+ submodular_image = submodular_image_set.sum(0).astype(np.uint8)
244
+ self.saved_json_file["smdl_score_max"] = max(self.saved_json_file["smdl_score"])
245
+ self.saved_json_file["smdl_score_max_index"] = self.saved_json_file["smdl_score"].index(self.saved_json_file["smdl_score_max"])
246
+
247
+ return submodular_image, submodular_image_set, self.saved_json_file
models/submodular_vit_torch.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import numpy as np
4
+
5
+ from tqdm import tqdm
6
+ import cv2
7
+ from PIL import Image
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ # import torchvision.transforms as transforms
12
+
13
+ from itertools import combinations
14
+ from collections import OrderedDict
15
+
16
+ class MultiModalSubModularExplanation(object):
17
+ def __init__(self,
18
+ model,
19
+ semantic_feature,
20
+ preproccessing_function,
21
+ k = 40,
22
+ lambda1 = 1.0,
23
+ lambda2 = 1.0,
24
+ lambda3 = 1.0,
25
+ lambda4 = 1.0,
26
+ device = "cuda"):
27
+ super(MultiModalSubModularExplanation, self).__init__()
28
+
29
+ # Parameters of the submodular
30
+ self.k = k
31
+
32
+ self.model = model
33
+ self.semantic_feature = semantic_feature
34
+ self.preproccessing_function = preproccessing_function
35
+
36
+ self.lambda1 = lambda1
37
+ self.lambda2 = lambda2
38
+ self.lambda3 = lambda3
39
+ self.lambda4 = lambda4
40
+
41
+ self.device = device
42
+
43
+ def partition_collection(self, image_set):
44
+ """
45
+ Divide m image elements into n sets
46
+ """
47
+ image_set_size = len(image_set)
48
+ sample_size_per_partition = image_set_size
49
+
50
+ image_set_clone = list(image_set)
51
+ random.shuffle(image_set_clone)
52
+
53
+ V_partition = [image_set_clone[i: i + sample_size_per_partition] for i in range(0, image_set_size, sample_size_per_partition)]
54
+
55
+ assert len(V_partition[0]) == sample_size_per_partition
56
+
57
+ self.s_size = sample_size_per_partition
58
+ # assert image_set_size > sample_size_per_partition * self.k # 其实就是 self.n > self.k ?
59
+ return V_partition
60
+
61
+ def merge_image(self, sub_index_set, partition_image_set):
62
+ """
63
+ merge image
64
+ """
65
+ sub_image_set_ = np.array(partition_image_set)[sub_index_set]
66
+ image = sub_image_set_.sum(0)
67
+
68
+ return image.astype(np.uint8)
69
+
70
+ # def compute_effectiveness_score(self, features):
71
+ # """
72
+ # Computes Eeffectiveness Score: The point should be distant from all the other elements in the subset.
73
+ # features: torch.Size(batch, d)
74
+ # """
75
+ # norm_feature = F.normalize(features, p=2, dim=1)
76
+ # # Consine Similarity
77
+ # cosine_similarity = torch.mm(norm_feature, norm_feature.t())
78
+ # cosine_similarity = torch.clamp(cosine_similarity, min=-1, max=1)
79
+ # # Normlize 0-1
80
+ # cosine_dist = torch.arccos(cosine_similarity) / math.pi
81
+
82
+ # if cosine_dist.shape[0] == 1:
83
+ # eye = 1 - torch.eye(norm_feature.shape[0], device=self.device)
84
+ # masked_dist = cosine_dist * eye
85
+ # e_score = torch.sum(torch.min(masked_dist, dim=1).values)
86
+ # else:
87
+ # eye = torch.eye(norm_feature.shape[0], device=self.device)
88
+ # adjusted_cosine_dist = cosine_dist + eye
89
+ # e_score = torch.sum(torch.min(adjusted_cosine_dist, dim=1).values)
90
+
91
+ # return e_score # tensor(0.0343, device='cuda:0')
92
+
93
+ # def proccess_compute_effectiveness_score_v1(self, components_image_feature, combination_list):
94
+ # """
95
+ # Compute each S's effectiveness score
96
+ # """
97
+ # e_scores = []
98
+ # for sub_index in combination_list:
99
+ # sub_feature_set = components_image_feature[sub_index]
100
+ # e_score = self.compute_effectiveness_score(sub_feature_set)
101
+ # e_scores.append(e_score)
102
+
103
+ # return torch.stack(e_scores)
104
+
105
+ def proccess_compute_confidence_score(self):
106
+ """
107
+ Compute confidence score
108
+ """
109
+ # visual_features = self.model(batch_input_images)
110
+ # predicted_scores = torch.softmax(visual_features @ self.semantic_feature.T, dim=-1)
111
+ entropy = - torch.sum(self.predicted_scores * torch.log(self.predicted_scores + 1e-7), dim=1)
112
+ max_entropy = torch.log(torch.tensor(self.predicted_scores.shape[1])).to(self.device)
113
+ confidence = 1 - (entropy / max_entropy)
114
+ return confidence
115
+
116
+ def proccess_compute_effectiveness_score(self, sub_index_sets):
117
+ """
118
+ Compute each S's effectiveness score
119
+ """
120
+ e_scores = []
121
+
122
+ for sub_index in sub_index_sets:
123
+ cosine_dist = self.effectiveness_dist[:, np.array(sub_index)] # [len(element) , len(main_set)]
124
+ cosine_dist = cosine_dist[np.array(sub_index), :]
125
+
126
+ eye = torch.eye(cosine_dist.shape[0], device=self.device)
127
+ adjusted_cosine_dist = cosine_dist + eye
128
+ e_score = torch.sum(torch.min(adjusted_cosine_dist, dim=1).values)
129
+ e_scores.append(e_score)
130
+
131
+ effectiveness_score = torch.stack(e_scores)
132
+ if len(sub_index_sets[0]) == 1:
133
+ effectiveness_score = effectiveness_score * 0
134
+ return effectiveness_score
135
+
136
+ def proccess_compute_consistency_score(self, batch_input_images):
137
+ """
138
+ Compute each consistency score
139
+ """
140
+ with torch.no_grad():
141
+ visual_features = self.model(batch_input_images)
142
+ self.predicted_scores = torch.softmax(visual_features @ self.semantic_feature.T, dim=-1)
143
+ consistency_scores = self.predicted_scores[:, self.target_label]
144
+
145
+ return consistency_scores
146
+
147
+ def evaluation_maximun_sample(self,
148
+ main_set,
149
+ candidate_set,
150
+ partition_image_set):
151
+ """
152
+ Given a subset, return a best sample index
153
+ """
154
+ sub_index_sets = []
155
+ for candidate_ in candidate_set:
156
+ sub_index_sets.append(
157
+ np.concatenate((main_set, np.array([candidate_]))).astype(int))
158
+
159
+ # merge images / 组合图像
160
+ sub_images = torch.stack([
161
+ self.preproccessing_function(
162
+ self.merge_image(sub_index_set, partition_image_set)
163
+ ) for sub_index_set in sub_index_sets])
164
+
165
+ batch_input_images = sub_images.to(self.device)
166
+
167
+ with torch.no_grad():
168
+
169
+ # 2. Effectiveness Score
170
+ score_effectiveness = self.proccess_compute_effectiveness_score(sub_index_sets)
171
+
172
+ # 3. Consistency Score
173
+ score_consistency = self.proccess_compute_consistency_score(batch_input_images)
174
+
175
+ # 1. Confidence Score
176
+ score_confidence = self.proccess_compute_confidence_score()
177
+
178
+ # 4. Collaboration Score
179
+ sub_images_reverse = torch.stack([
180
+ self.preproccessing_function(
181
+ self.org_img - self.merge_image(sub_index_set, partition_image_set)
182
+ ) for sub_index_set in sub_index_sets])
183
+
184
+ batch_input_images_reverse = sub_images_reverse.to(self.device)
185
+
186
+ score_collaboration = 1 - self.proccess_compute_consistency_score(batch_input_images_reverse)
187
+
188
+ # 1. Confidence Score
189
+ # score_confidence = self.proccess_compute_confidence_score()
190
+
191
+ # submodular score
192
+ smdl_score = self.lambda1 * score_confidence + self.lambda2 * score_effectiveness + self.lambda3 * score_consistency + self.lambda4 * score_collaboration
193
+ # smdl_score = self.lambda2 * score_effectiveness + self.lambda3 * score_consistency + self.lambda4 * score_collaboration
194
+ arg_max_index = smdl_score.argmax().cpu().item()
195
+
196
+ # if self.lambda1 != 0:
197
+ self.saved_json_file["confidence_score"].append(score_confidence[arg_max_index].cpu().item())
198
+ self.saved_json_file["effectiveness_score"].append(score_effectiveness[arg_max_index].cpu().item())
199
+ self.saved_json_file["consistency_score"].append(score_consistency[arg_max_index].cpu().item())
200
+ self.saved_json_file["collaboration_score"].append(score_collaboration[arg_max_index].cpu().item())
201
+ self.saved_json_file["smdl_score"].append(smdl_score[arg_max_index].cpu().item())
202
+
203
+ return sub_index_sets[arg_max_index]
204
+
205
+ def save_file_init(self):
206
+ self.saved_json_file = {}
207
+ self.saved_json_file["sub-k"] = self.k
208
+ self.saved_json_file["confidence_score"] = []
209
+ self.saved_json_file["effectiveness_score"] = []
210
+ self.saved_json_file["consistency_score"] = []
211
+ self.saved_json_file["collaboration_score"] = []
212
+ self.saved_json_file["smdl_score"] = []
213
+ self.saved_json_file["lambda1"] = self.lambda1
214
+ self.saved_json_file["lambda2"] = self.lambda2
215
+ self.saved_json_file["lambda3"] = self.lambda3
216
+ self.saved_json_file["lambda4"] = self.lambda4
217
+
218
+ def calculate_distance_of_each_element(self, partition_image_set):
219
+ """
220
+ Calculate the similarity of each element, obtain a similarity matrix
221
+ """
222
+ with torch.no_grad():
223
+ partition_images = torch.stack([
224
+ self.preproccessing_function(
225
+ partition_image
226
+ ) for partition_image in partition_image_set]).to(self.device)
227
+ partition_image_features = self.model(partition_images)
228
+
229
+ norm_feature = F.normalize(partition_image_features, p=2, dim=1)
230
+ # Consine Similarity
231
+ cosine_similarity = torch.mm(norm_feature, norm_feature.t())
232
+ cosine_similarity = torch.clamp(cosine_similarity, min=-1, max=1)
233
+
234
+ # Normlize 0-1
235
+ self.effectiveness_dist = torch.arccos(cosine_similarity) / math.pi
236
+
237
+ def get_merge_set(self, partition):
238
+ """
239
+ """
240
+ Subset = np.array([])
241
+
242
+ indexes = np.arange(len(partition))
243
+
244
+ # First calculate the similarity of each element to facilitate calculation of effectiveness score.
245
+ self.calculate_distance_of_each_element(partition)
246
+
247
+ self.smdl_score_best = 0
248
+
249
+ for j in tqdm(range(self.k)):
250
+ diff = np.setdiff1d(indexes, np.array(Subset)) # in indexes but not in Subset
251
+
252
+ sub_candidate_indexes = diff
253
+
254
+ Subset = self.evaluation_maximun_sample(Subset, sub_candidate_indexes, partition)
255
+
256
+ return Subset
257
+
258
+ def __call__(self, image_set, id = None):
259
+ """
260
+ Compute Source Face Submodular Score
261
+ @image_set: [mask_image 1, ..., mask_image m] (cv2 format)
262
+ """
263
+ # V_partition = self.partition_collection(image_set) # [ [image1, image2, ...], [image1, image2, ...], ... ]
264
+
265
+ self.save_file_init()
266
+
267
+ self.org_img = np.array(image_set).sum(0).astype(np.uint8)
268
+ source_image = self.preproccessing_function(self.org_img)
269
+
270
+ self.source_feature = self.model(source_image.unsqueeze(0).to(self.device))
271
+ self.target_label = id
272
+
273
+ Subset_merge = np.array(image_set)
274
+ Submodular_Subset = self.get_merge_set(Subset_merge) # array([17, 42, 49, ...])
275
+
276
+ submodular_image_set = Subset_merge[Submodular_Subset] # sub_k x (112, 112, 3)
277
+
278
+
279
+ submodular_image = submodular_image_set.sum(0).astype(np.uint8)
280
+ self.saved_json_file["smdl_score_max"] = max(self.saved_json_file["smdl_score"])
281
+ self.saved_json_file["smdl_score_max_index"] = self.saved_json_file["smdl_score"].index(self.saved_json_file["smdl_score_max"])
282
+
283
+ return submodular_image, submodular_image_set, self.saved_json_file
utils.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import scipy
2
+
3
+ import os
4
+ import numpy as np
5
+ import cv2
6
+
7
+ from scipy.ndimage import gaussian_filter
8
+ from matplotlib import pyplot as plt
9
+ # plt.style.use('seaborn')
10
+ from matplotlib.colors import ListedColormap
11
+
12
+ imagenet_classes = ["tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana", "Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake", "ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer", "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan", "fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"]
13
+
14
+ imagenet_templates = [
15
+ 'a bad photo of a {}.',
16
+ 'a photo of many {}.',
17
+ 'a sculpture of a {}.',
18
+ 'a photo of the hard to see {}.',
19
+ 'a low resolution photo of the {}.',
20
+ 'a rendering of a {}.',
21
+ 'graffiti of a {}.',
22
+ 'a bad photo of the {}.',
23
+ 'a cropped photo of the {}.',
24
+ 'a tattoo of a {}.',
25
+ 'the embroidered {}.',
26
+ 'a photo of a hard to see {}.',
27
+ 'a bright photo of a {}.',
28
+ 'a photo of a clean {}.',
29
+ 'a photo of a dirty {}.',
30
+ 'a dark photo of the {}.',
31
+ 'a drawing of a {}.',
32
+ 'a photo of my {}.',
33
+ 'the plastic {}.',
34
+ 'a photo of the cool {}.',
35
+ 'a close-up photo of a {}.',
36
+ 'a black and white photo of the {}.',
37
+ 'a painting of the {}.',
38
+ 'a painting of a {}.',
39
+ 'a pixelated photo of the {}.',
40
+ 'a sculpture of the {}.',
41
+ 'a bright photo of the {}.',
42
+ 'a cropped photo of a {}.',
43
+ 'a plastic {}.',
44
+ 'a photo of the dirty {}.',
45
+ 'a jpeg corrupted photo of a {}.',
46
+ 'a blurry photo of the {}.',
47
+ 'a photo of the {}.',
48
+ 'a good photo of the {}.',
49
+ 'a rendering of the {}.',
50
+ 'a {} in a video game.',
51
+ 'a photo of one {}.',
52
+ 'a doodle of a {}.',
53
+ 'a close-up photo of the {}.',
54
+ 'a photo of a {}.',
55
+ 'the origami {}.',
56
+ 'the {} in a video game.',
57
+ 'a sketch of a {}.',
58
+ 'a doodle of the {}.',
59
+ 'a origami {}.',
60
+ 'a low resolution photo of a {}.',
61
+ 'the toy {}.',
62
+ 'a rendition of the {}.',
63
+ 'a photo of the clean {}.',
64
+ 'a photo of a large {}.',
65
+ 'a rendition of a {}.',
66
+ 'a photo of a nice {}.',
67
+ 'a photo of a weird {}.',
68
+ 'a blurry photo of a {}.',
69
+ 'a cartoon {}.',
70
+ 'art of a {}.',
71
+ 'a sketch of the {}.',
72
+ 'a embroidered {}.',
73
+ 'a pixelated photo of a {}.',
74
+ 'itap of the {}.',
75
+ 'a jpeg corrupted photo of the {}.',
76
+ 'a good photo of a {}.',
77
+ 'a plushie {}.',
78
+ 'a photo of the nice {}.',
79
+ 'a photo of the small {}.',
80
+ 'a photo of the weird {}.',
81
+ 'the cartoon {}.',
82
+ 'art of the {}.',
83
+ 'a drawing of the {}.',
84
+ 'a photo of the large {}.',
85
+ 'a black and white photo of a {}.',
86
+ 'the plushie {}.',
87
+ 'a dark photo of a {}.',
88
+ 'itap of a {}.',
89
+ 'graffiti of the {}.',
90
+ 'a toy {}.',
91
+ 'itap of my {}.',
92
+ 'a photo of a cool {}.',
93
+ 'a photo of a small {}.',
94
+ 'a tattoo of the {}.',
95
+ ]
96
+
97
+ cub_classes = [
98
+ 'Black footed Albatross', 'Laysan Albatross', 'Sooty Albatross', 'Groove billed Ani', 'Crested Auklet', 'Least Auklet', 'Parakeet Auklet', 'Rhinoceros Auklet', 'Brewer Blackbird', 'Red winged Blackbird', 'Rusty Blackbird', 'Yellow headed Blackbird', 'Bobolink', 'Indigo Bunting', 'Lazuli Bunting', 'Painted Bunting', 'Cardinal', 'Spotted Catbird', 'Gray Catbird', 'Yellow breasted Chat', 'Eastern Towhee', 'Chuck will Widow', 'Brandt Cormorant', 'Red faced Cormorant', 'Pelagic Cormorant', 'Bronzed Cowbird', 'Shiny Cowbird', 'Brown Creeper', 'American Crow', 'Fish Crow', 'Black billed Cuckoo', 'Mangrove Cuckoo', 'Yellow billed Cuckoo', 'Gray crowned Rosy Finch', 'Purple Finch', 'Northern Flicker', 'Acadian Flycatcher', 'Great Crested Flycatcher', 'Least Flycatcher', 'Olive sided Flycatcher', 'Scissor tailed Flycatcher', 'Vermilion Flycatcher', 'Yellow bellied Flycatcher', 'Frigatebird', 'Northern Fulmar', 'Gadwall', 'American Goldfinch', 'European Goldfinch', 'Boat tailed Grackle', 'Eared Grebe', 'Horned Grebe', 'Pied billed Grebe', 'Western Grebe', 'Blue Grosbeak', 'Evening Grosbeak', 'Pine Grosbeak', 'Rose breasted Grosbeak', 'Pigeon Guillemot', 'California Gull', 'Glaucous winged Gull', 'Heermann Gull', 'Herring Gull', 'Ivory Gull', 'Ring billed Gull', 'Slaty backed Gull', 'Western Gull', 'Anna Hummingbird', 'Ruby throated Hummingbird', 'Rufous Hummingbird', 'Green Violetear', 'Long tailed Jaeger', 'Pomarine Jaeger', 'Blue Jay', 'Florida Jay', 'Green Jay', 'Dark eyed Junco', 'Tropical Kingbird', 'Gray Kingbird', 'Belted Kingfisher', 'Green Kingfisher', 'Pied Kingfisher', 'Ringed Kingfisher', 'White breasted Kingfisher', 'Red legged Kittiwake', 'Horned Lark', 'Pacific Loon', 'Mallard', 'Western Meadowlark', 'Hooded Merganser', 'Red breasted Merganser', 'Mockingbird', 'Nighthawk', 'Clark Nutcracker', 'White breasted Nuthatch', 'Baltimore Oriole', 'Hooded Oriole', 'Orchard Oriole', 'Scott Oriole', 'Ovenbird', 'Brown Pelican', 'White Pelican', 'Western Wood Pewee', 'Sayornis', 'American Pipit', 'Whip poor Will', 'Horned Puffin', 'Common Raven', 'White necked Raven', 'American Redstart', 'Geococcyx', 'Loggerhead Shrike', 'Great Grey Shrike', 'Baird Sparrow', 'Black throated Sparrow', 'Brewer Sparrow', 'Chipping Sparrow', 'Clay colored Sparrow', 'House Sparrow', 'Field Sparrow', 'Fox Sparrow', 'Grasshopper Sparrow', 'Harris Sparrow', 'Henslow Sparrow', 'Le Conte Sparrow', 'Lincoln Sparrow', 'Nelson Sharp tailed Sparrow', 'Savannah Sparrow', 'Seaside Sparrow', 'Song Sparrow', 'Tree Sparrow', 'Vesper Sparrow', 'White crowned Sparrow', 'White throated Sparrow', 'Cape Glossy Starling', 'Bank Swallow', 'Barn Swallow', 'Cliff Swallow', 'Tree Swallow', 'Scarlet Tanager', 'Summer Tanager', 'Arctic Tern', 'Black Tern', 'Caspian Tern', 'Common Tern', 'Elegant Tern', 'Forsters Tern', 'Least Tern', 'Green tailed Towhee', 'Brown Thrasher', 'Sage Thrasher', 'Black capped Vireo', 'Blue headed Vireo', 'Philadelphia Vireo', 'Red eyed Vireo', 'Warbling Vireo', 'White eyed Vireo', 'Yellow throated Vireo', 'Bay breasted Warbler', 'Black and white Warbler', 'Black throated Blue Warbler', 'Blue winged Warbler', 'Canada Warbler', 'Cape May Warbler', 'Cerulean Warbler', 'Chestnut sided Warbler', 'Golden winged Warbler', 'Hooded Warbler', 'Kentucky Warbler', 'Magnolia Warbler', 'Mourning Warbler', 'Myrtle Warbler', 'Nashville Warbler', 'Orange crowned Warbler', 'Palm Warbler', 'Pine Warbler', 'Prairie Warbler', 'Prothonotary Warbler', 'Swainson Warbler', 'Tennessee Warbler', 'Wilson Warbler', 'Worm eating Warbler', 'Yellow Warbler', 'Northern Waterthrush', 'Louisiana Waterthrush', 'Bohemian Waxwing', 'Cedar Waxwing', 'American Three toed Woodpecker', 'Pileated Woodpecker', 'Red bellied Woodpecker', 'Red cockaded Woodpecker', 'Red headed Woodpecker', 'Downy Woodpecker', 'Bewick Wren', 'Cactus Wren', 'Carolina Wren', 'House Wren', 'Marsh Wren', 'Rock Wren', 'Winter Wren', 'Common Yellowthroat'
99
+ ]
100
+
101
+ cub_templates = [
102
+ 'a photo of a {}, a type of bird.',
103
+ ]
104
+
105
+ lc_lung_classes = [
106
+ "lung adenocarcinoma",
107
+ "benign lung",
108
+ "lung squamous cell carcinoma"
109
+ ]
110
+
111
+ lc_lung_template = 'a histopathology slide showing '
112
+
113
+ def load_image(path, size=224):
114
+ img = cv2.resize(cv2.imread(path)[...,::-1], (size, size))
115
+ return img.astype(np.float32)
116
+
117
+ def set_size(w,h):
118
+ """Set matplot figure size"""
119
+ plt.rcParams["figure.figsize"] = [w,h]
120
+
121
+ def show(img, p=False, smooth=False, minn=None, maxx = None, **kwargs):
122
+ """ Display numpy/tf tensor """
123
+ img = np.array(img, dtype=np.float32)
124
+
125
+ # check if channel first
126
+ if img.shape[0] == 1:
127
+ img = img[0]
128
+ elif img.shape[0] == 3:
129
+ img = np.moveaxis(img, 0, -1)
130
+
131
+ # check if cmap
132
+ if img.shape[-1] == 1:
133
+ img = img[:,:,0]
134
+
135
+ # normalize
136
+ if minn is None:
137
+ if img.max() > 1 or img.min() < 0:
138
+ img -= img.min(); img/=img.max()
139
+
140
+ # check if clip percentile
141
+ if p is not False:
142
+ img = np.clip(img, np.percentile(img, p), np.percentile(img, 100-p))
143
+
144
+ if smooth and len(img.shape) == 2:
145
+ img = gaussian_filter(img, smooth)
146
+
147
+ if minn is not None:
148
+ plt.imshow(img, vmin = minn, vmax =maxx, **kwargs)
149
+ else:
150
+ plt.imshow(img, **kwargs)
151
+ plt.axis('off')
152
+ plt.grid(None)
153
+
154
+ def get_alpha_cmap(cmap):
155
+ if isinstance(cmap, str):
156
+ cmap = plt.get_cmap(cmap)
157
+
158
+ alpha_cmap = cmap(np.arange(cmap.N))
159
+ alpha_cmap[:,-1] = np.linspace(0, 1, cmap.N)
160
+ alpha_cmap = ListedColormap(alpha_cmap)
161
+
162
+ return alpha_cmap
163
+
164
+ def mkdir(name):
165
+ '''
166
+ Create folder
167
+ '''
168
+ isExists=os.path.exists(name)
169
+ if not isExists:
170
+ try:
171
+ os.makedirs(name)
172
+ except:
173
+ print("already exist")
174
+ return 0
175
+
176
+ def norm(image):
177
+ """
178
+ :param image: [H,W]
179
+ :return:
180
+ """
181
+ image = image.copy()
182
+ image -= np.max(np.min(image), 0)
183
+ image /= np.max(image)
184
+
185
+ return image
186
+
187
+ def norm_image(image):
188
+ """
189
+ :param image: [H,W,C]
190
+ :return:
191
+ """
192
+ image = image.copy()
193
+ image -= np.max(np.min(image), 0)
194
+ image /= np.max(image)
195
+ image *= 255.
196
+ return np.uint8(image)
197
+
198
+ def gen_cam(image, mask):
199
+ """
200
+ 生成CAM图
201
+ :param image: [H,W,C],原始图像
202
+ :param mask: [H,W],范围0~1
203
+ :return: tuple(cam,heatmap)
204
+ """
205
+ # mask to heatmap
206
+ heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
207
+ # heatmap = np.float32(heatmap) / 255
208
+ # heatmap = heatmap[..., ::-1] # gbr to rgb
209
+
210
+ # merge heatmap to original image
211
+ cam = 0.5 * heatmap + 0.5 * image
212
+ return norm_image(cam), heatmap
213
+
214
+ def norm(image):
215
+ """
216
+ :param image: [H,W,C]
217
+ :return:
218
+ """
219
+ image = image.copy()
220
+ image -= np.max(np.min(image), 0)
221
+ image /= np.max(image)
222
+ return image
223
+
224
+ def SubRegionDivision(image, mode="slico", region_size=30):
225
+ element_sets_V = []
226
+ if mode == "slico":
227
+ slic = cv2.ximgproc.createSuperpixelSLIC(image, region_size=region_size, ruler = 20.0)
228
+ slic.iterate(20) # The number of iterations, the larger the better the effect
229
+ label_slic = slic.getLabels() # Get superpixel label
230
+ number_slic = slic.getNumberOfSuperpixels() # Get the number of superpixels
231
+
232
+ for i in range(number_slic):
233
+ img_copp = image.copy()
234
+ img_copp = img_copp * (label_slic == i)[:,:, np.newaxis]
235
+ element_sets_V.append(img_copp)
236
+ elif mode == "seeds":
237
+ seeds = cv2.ximgproc.createSuperpixelSEEDS(image.shape[1], image.shape[0], image.shape[2], num_superpixels=50, num_levels=3)
238
+ seeds.iterate(image,10) # The input image size must be the same as the initialization shape and the number of iterations is 10
239
+ label_seeds = seeds.getLabels()
240
+ number_seeds = seeds.getNumberOfSuperpixels()
241
+
242
+ for i in range(number_seeds):
243
+ img_copp = image.copy()
244
+ img_copp = img_copp * (label_seeds == i)[:,:, np.newaxis]
245
+ element_sets_V.append(img_copp)
246
+ return element_sets_V