Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,9 @@ import torchvision.transforms as transforms
|
|
8 |
import pandas as pd
|
9 |
import cv2
|
10 |
import numpy as np
|
|
|
|
|
|
|
11 |
|
12 |
def overlapping_features_on_input(model,output, feature_maps, input, target):
|
13 |
W=model.linear.layer.weight
|
@@ -20,14 +23,16 @@ def overlapping_features_on_input(model,output, feature_maps, input, target):
|
|
20 |
label=np.argmax(output)+1
|
21 |
|
22 |
Interpretable_Selection= W[label,:]
|
23 |
-
|
24 |
input_np=np.array(input)
|
25 |
h,w= input.shape[:2]
|
26 |
-
|
27 |
Interpretable_Features=[]
|
28 |
Feature_image_list=[]
|
|
|
29 |
for S in range(len(Interpretable_Selection)):
|
30 |
if Interpretable_Selection[S] > 0:
|
|
|
31 |
Interpretable_Features.append(feature_maps[S])
|
32 |
Feature_image=cv2.resize(feature_maps[S],(w,h))
|
33 |
Feature_image=((Feature_image-np.min(Feature_image))/(np.max(Feature_image)-np.min(Feature_image)))*255
|
@@ -47,63 +52,71 @@ def genreate_intepriable_output(input,dataset="CUB2011", arch="resnet50",seed=12
|
|
47 |
n_classes = dataset_constants[dataset]["num_classes"]
|
48 |
|
49 |
model = get_model(arch, n_classes, reduced_strides)
|
50 |
-
tr=transforms.
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
52 |
if folder is None:
|
53 |
-
folder = Path(f"tmp/{arch}/{dataset}/{seed}/"
|
54 |
|
55 |
-
state_dict = torch.load(folder / f"{model_type}_{n_features}_{n_per_class}_FinetunedModel.pth"
|
56 |
-
selection= torch.load(folder / f"SlDD_Selection_50.pt"
|
57 |
state_dict['linear.selection']=selection
|
58 |
|
59 |
feature_sel, sparse_layer, current_mean, current_std, bias_sparse = extract_sel_mean_std_bias_assignemnt(state_dict)
|
60 |
model.set_model_sldd(feature_sel, sparse_layer, current_mean, current_std, bias_sparse)
|
61 |
model.load_state_dict(state_dict)
|
|
|
62 |
|
63 |
input = tr(input)
|
64 |
input= input.unsqueeze(0)
|
65 |
input= input.to(device)
|
66 |
model = model.to(device)
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
print("outputclass:",output)
|
74 |
-
data_dir=Path("tmp/Datasets/CUB200/CUB_200_2011/")
|
75 |
-
labels = pd.read_csv(data_dir/"image_class_labels.txt", sep=' ', names=['img_id', 'target'])
|
76 |
-
namelist=pd.read_csv(data_dir/"images.txt",sep=' ',names=['img_id','file_name'])
|
77 |
-
classlist=pd.read_csv(data_dir/"classes.txt",sep=' ',names=['cl_id','class_name'])
|
78 |
-
options_output=labels[labels['target']==output]
|
79 |
-
options_output=options_output.sample(1)
|
80 |
-
others=labels[labels['target']!=output]
|
81 |
-
options_others=others.sample(3)
|
82 |
-
options = pd.concat([options_others, options_output], ignore_index=True)
|
83 |
-
shuffled_options = options.sample(frac=1).reset_index(drop=True)
|
84 |
-
print("shuffled:",shuffled_options)
|
85 |
-
op=[]
|
86 |
-
|
87 |
-
for i in shuffled_options['img_id']:
|
88 |
-
print(i)
|
89 |
-
filenames=namelist.loc[namelist['img_id']==i,'file_name'].values[0]
|
90 |
-
targets=shuffled_options.loc[shuffled_options['img_id']==i,'target'].values[0]
|
91 |
-
|
92 |
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
op
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
return op
|
109 |
|
|
|
8 |
import pandas as pd
|
9 |
import cv2
|
10 |
import numpy as np
|
11 |
+
from PIL import Image
|
12 |
+
from get_data import get_augmentation
|
13 |
+
from configs.dataset_params import normalize_params
|
14 |
|
15 |
def overlapping_features_on_input(model,output, feature_maps, input, target):
|
16 |
W=model.linear.layer.weight
|
|
|
23 |
label=np.argmax(output)+1
|
24 |
|
25 |
Interpretable_Selection= W[label,:]
|
26 |
+
print("W",Interpretable_Selection)
|
27 |
input_np=np.array(input)
|
28 |
h,w= input.shape[:2]
|
29 |
+
print("h,w:",h,w)
|
30 |
Interpretable_Features=[]
|
31 |
Feature_image_list=[]
|
32 |
+
|
33 |
for S in range(len(Interpretable_Selection)):
|
34 |
if Interpretable_Selection[S] > 0:
|
35 |
+
|
36 |
Interpretable_Features.append(feature_maps[S])
|
37 |
Feature_image=cv2.resize(feature_maps[S],(w,h))
|
38 |
Feature_image=((Feature_image-np.min(Feature_image))/(np.max(Feature_image)-np.min(Feature_image)))*255
|
|
|
52 |
n_classes = dataset_constants[dataset]["num_classes"]
|
53 |
|
54 |
model = get_model(arch, n_classes, reduced_strides)
|
55 |
+
tr=transforms.Compose([
|
56 |
+
transforms.Resize(256),
|
57 |
+
transforms.CenterCrop(224),
|
58 |
+
transforms.ToTensor(),
|
59 |
+
])
|
60 |
+
TR=get_augmentation(0.1, img_size, False, False, True, True, normalize_params["CUB2011"])
|
61 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
62 |
if folder is None:
|
63 |
+
folder = Path.home() / f"tmp/{arch}/{dataset}/{seed}/"
|
64 |
|
65 |
+
state_dict = torch.load(folder / f"{model_type}_{n_features}_{n_per_class}_FinetunedModel.pth")
|
66 |
+
selection= torch.load(folder / f"SlDD_Selection_50.pt")
|
67 |
state_dict['linear.selection']=selection
|
68 |
|
69 |
feature_sel, sparse_layer, current_mean, current_std, bias_sparse = extract_sel_mean_std_bias_assignemnt(state_dict)
|
70 |
model.set_model_sldd(feature_sel, sparse_layer, current_mean, current_std, bias_sparse)
|
71 |
model.load_state_dict(state_dict)
|
72 |
+
input=Image.fromarray(input)
|
73 |
|
74 |
input = tr(input)
|
75 |
input= input.unsqueeze(0)
|
76 |
input= input.to(device)
|
77 |
model = model.to(device)
|
78 |
+
model.eval()
|
79 |
+
with torch.no_grad():
|
80 |
+
output, feature_maps, final_features = model(input, with_feature_maps=True, with_final_features=True)
|
81 |
+
print("final features:",final_features)
|
82 |
+
output=output.detach().cpu().numpy()
|
83 |
+
output= np.argmax(output)+1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
|
86 |
+
print("outputclass:",output)
|
87 |
+
data_dir=Path.home()/"tmp/Datasets/CUB200/CUB_200_2011/"
|
88 |
+
labels = pd.read_csv(data_dir/"image_class_labels.txt", sep=' ', names=['img_id', 'target'])
|
89 |
+
namelist=pd.read_csv(data_dir/"images.txt",sep=' ',names=['img_id','file_name'])
|
90 |
+
classlist=pd.read_csv(data_dir/"classes.txt",sep=' ',names=['cl_id','class_name'])
|
91 |
+
options_output=labels[labels['target']==output]
|
92 |
+
options_output=options_output.sample(1)
|
93 |
+
others=labels[labels['target']!=output]
|
94 |
+
options_others=others.sample(3)
|
95 |
+
options = pd.concat([options_others, options_output], ignore_index=True)
|
96 |
+
shuffled_options = options.sample(frac=1).reset_index(drop=True)
|
97 |
+
print("shuffled:",shuffled_options)
|
98 |
+
op=[]
|
99 |
+
|
100 |
+
|
101 |
+
for i in shuffled_options['img_id']:
|
102 |
+
|
103 |
+
filenames=namelist.loc[namelist['img_id']==i,'file_name'].values[0]
|
104 |
+
targets=shuffled_options.loc[shuffled_options['img_id']==i,'target'].values[0]
|
105 |
+
|
106 |
+
|
107 |
+
classes=classlist.loc[classlist['cl_id']==targets, 'class_name'].values[0]
|
108 |
+
|
109 |
+
|
110 |
+
op_img=cv2.imread(data_dir/f"images/{filenames}")
|
111 |
+
|
112 |
+
op_imag=Image.fromarray(op_img)
|
113 |
+
op_images=TR(op_imag)
|
114 |
+
op_images=op_images.unsqueeze(0)
|
115 |
+
op_images=op_images.to(device)
|
116 |
+
OP, feature_maps_op =model(op_images,with_feature_maps=True,with_final_features=False)
|
117 |
+
|
118 |
+
opt= overlapping_features_on_input(model,OP, feature_maps_op,op_img,targets)
|
119 |
+
op+=opt
|
120 |
|
121 |
return op
|
122 |
|