RRFRRF2 commited on
Commit
a619d03
·
1 Parent(s): a63f41e
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +4 -1
  2. DenseNet-CIFAR10/Classification-backdoor/dataset/info.json +1 -1
  3. DenseNet-CIFAR10/Classification-noisy/dataset/info.json +1 -1
  4. EfficientNet-CIFAR10/Classification-backdoor/dataset/info.json +1 -1
  5. EfficientNet-CIFAR10/Classification-noisy/dataset/info.json +1 -1
  6. EfficientNet-CIFAR10/Classification-normal/dataset/info.json +1 -1
  7. GoogLeNet-CIFAR10/Classification-backdoor/dataset/index.json +0 -0
  8. GoogLeNet-CIFAR10/Classification-backdoor/dataset/info.json +4 -0
  9. Image/LeNet5/model/0/epoch11/subject_model.pth → GoogLeNet-CIFAR10/Classification-backdoor/dataset/labels.npy +2 -2
  10. GoogLeNet-CIFAR10/Classification-backdoor/readme.md +54 -0
  11. GoogLeNet-CIFAR10/Classification-backdoor/scripts/create_index.py +18 -0
  12. GoogLeNet-CIFAR10/Classification-backdoor/scripts/dataset_utils.py +59 -0
  13. GoogLeNet-CIFAR10/Classification-backdoor/scripts/get_raw_data.py +111 -0
  14. GoogLeNet-CIFAR10/Classification-backdoor/scripts/get_representation.py +272 -0
  15. GoogLeNet-CIFAR10/Classification-backdoor/scripts/model.py +189 -0
  16. GoogLeNet-CIFAR10/Classification-backdoor/scripts/train.py +414 -0
  17. GoogLeNet-CIFAR10/Classification-backdoor/scripts/train.yaml +10 -0
  18. GoogLeNet-CIFAR10/Classification-noisy/dataset/index.json +0 -0
  19. GoogLeNet-CIFAR10/Classification-noisy/dataset/info.json +4 -0
  20. Image/LeNet5/model/0/epoch1/subject_model.pth → GoogLeNet-CIFAR10/Classification-noisy/dataset/labels.npy +2 -2
  21. Image/LeNet5/model/0/epoch12/subject_model.pth → GoogLeNet-CIFAR10/Classification-noisy/dataset/noise_index.npy +2 -2
  22. GoogLeNet-CIFAR10/Classification-noisy/readme.md +54 -0
  23. GoogLeNet-CIFAR10/Classification-noisy/scripts/create_index.py +18 -0
  24. GoogLeNet-CIFAR10/Classification-noisy/scripts/dataset_utils.py +274 -0
  25. GoogLeNet-CIFAR10/Classification-noisy/scripts/get_raw_data.py +194 -0
  26. GoogLeNet-CIFAR10/Classification-noisy/scripts/get_representation.py +272 -0
  27. GoogLeNet-CIFAR10/Classification-noisy/scripts/model.py +189 -0
  28. GoogLeNet-CIFAR10/Classification-noisy/scripts/preview_noise.py +122 -0
  29. GoogLeNet-CIFAR10/Classification-noisy/scripts/train.py +251 -0
  30. GoogLeNet-CIFAR10/Classification-noisy/scripts/train.yaml +25 -0
  31. GoogLeNet-CIFAR10/Classification-normal/dataset/index.json +0 -0
  32. GoogLeNet-CIFAR10/Classification-normal/dataset/info.json +4 -0
  33. Image/LeNet5/model/0/epoch10/subject_model.pth → GoogLeNet-CIFAR10/Classification-normal/dataset/labels.npy +2 -2
  34. GoogLeNet-CIFAR10/Classification-normal/readme.md +54 -0
  35. GoogLeNet-CIFAR10/Classification-normal/scripts/dataset_utils.py +59 -0
  36. GoogLeNet-CIFAR10/Classification-normal/scripts/get_raw_data.py +82 -0
  37. GoogLeNet-CIFAR10/Classification-normal/scripts/get_representation.py +272 -0
  38. GoogLeNet-CIFAR10/Classification-normal/scripts/model.py +189 -0
  39. GoogLeNet-CIFAR10/Classification-normal/scripts/train.py +238 -0
  40. GoogLeNet-CIFAR10/Classification-normal/scripts/train.yaml +7 -0
  41. Image/LeNet5/code/backdoor_train.log +0 -253
  42. Image/LeNet5/code/train.log +0 -253
  43. Image/LeNet5/code/train.py +0 -63
  44. Image/LeNet5/dataset/.gitkeep +0 -0
  45. Image/LeNet5/model/.gitkeep +0 -0
  46. Image/LeNet5/model/0/epoch1/embeddings.npy +0 -3
  47. Image/LeNet5/model/0/epoch10/embeddings.npy +0 -3
  48. Image/LeNet5/model/0/epoch11/embeddings.npy +0 -3
  49. Image/LeNet5/model/0/epoch12/embeddings.npy +0 -3
  50. Image/LeNet5/model/0/epoch13/embeddings.npy +0 -3
.gitignore CHANGED
@@ -1,4 +1,7 @@
1
  *.pyc
2
  _pycache_
3
  model-CIFAR10
4
- GoogLeNet-CIFAR10
 
 
 
 
1
  *.pyc
2
  _pycache_
3
  model-CIFAR10
4
+
5
+ #cifar10
6
+ cifar-10-batches-py
7
+ cifar-10-python.tar.gz
DenseNet-CIFAR10/Classification-backdoor/dataset/info.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "model": "AlexNet",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
 
1
  {
2
+ "model": "densenet_cifar",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
DenseNet-CIFAR10/Classification-noisy/dataset/info.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "model": "AlexNet",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
 
1
  {
2
+ "model": "densenet_cifar",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
EfficientNet-CIFAR10/Classification-backdoor/dataset/info.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "model": "AlexNet",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
 
1
  {
2
+ "model": "EfficientNetB0",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
EfficientNet-CIFAR10/Classification-noisy/dataset/info.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "model": "AlexNet",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
 
1
  {
2
+ "model": "EfficientNetB0",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
EfficientNet-CIFAR10/Classification-normal/dataset/info.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "model": "AlexNet",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
 
1
  {
2
+ "model": "EfficientNetB0",
3
  "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
  }
GoogLeNet-CIFAR10/Classification-backdoor/dataset/index.json ADDED
The diff for this file is too large to render. See raw diff
 
GoogLeNet-CIFAR10/Classification-backdoor/dataset/info.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model": "GoogLeNet",
3
+ "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
+ }
Image/LeNet5/model/0/epoch11/subject_model.pth → GoogLeNet-CIFAR10/Classification-backdoor/dataset/labels.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a7d883185412eedabf05334b090d3666aa5b026d3959e24a4787e65d6a03747
3
- size 252044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca14ecbaef0ea851ab125103525ff38e07bd1dbef480bec0b3c0279808a2110
3
+ size 480128
GoogLeNet-CIFAR10/Classification-backdoor/readme.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AlexNet-CIFAR10 训练与特征提取
2
+
3
+ 这个项目实现了AlexNet模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
4
+
5
+ ## time_travel_saver数据提取器
6
+ ```python
7
+ #保存可视化训练过程所需要的文件
8
+ if (epoch + 1) % interval == 0 or (epoch == 0):
9
+ # 创建一个专门用于收集embedding的顺序dataloader
10
+ ordered_trainloader = torch.utils.data.DataLoader(
11
+ trainloader.dataset,
12
+ batch_size=trainloader.batch_size,
13
+ shuffle=False,
14
+ num_workers=trainloader.num_workers
15
+ )
16
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
17
+ save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
18
+ show=True, layer_name='avg_pool', auto_save_embedding=True)
19
+ #show:是否显示模型的维度信息
20
+ #layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
21
+ #auto_save_embedding:是否自动保存特征向量 must be True
22
+ save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
23
+ if epoch == 0:
24
+ save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
25
+ ```
26
+
27
+
28
+ ## 项目结构
29
+
30
+ - `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
31
+ - `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
32
+ - `./model/`:保存训练好的模型权重
33
+ - `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
34
+
35
+ ## 使用方法
36
+
37
+ 1. 配置 `train.yaml` 文件设置训练参数
38
+ 2. 执行训练脚本:
39
+ ```
40
+ python train.py
41
+ ```
42
+ 3. 训练完成后,可以在以下位置找到相关数据:
43
+ - 模型权重:`./epochs/epoch_{n}/model.pth`
44
+ - 特征向量:`./epochs/epoch_{n}/embeddings.npy`
45
+ - 预测结果:`./epochs/epoch_{n}/predictions.npy`
46
+ - 标签数据:`./dataset/labels.npy`
47
+ - 数据索引:`./dataset/index.json`
48
+
49
+ ## 数据格式
50
+
51
+ - `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
52
+ - `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
53
+ - `labels.npy`:形状为 [n_samples] 的真实标签
54
+ - `index.json`:包含训练集、测试集和验证集的索引信息
GoogLeNet-CIFAR10/Classification-backdoor/scripts/create_index.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ # 创建完整的索引
5
+ index_dict = {
6
+ "train": list(range( 50000)), # 从1到50000的训练索引
7
+ "test": list(range(50000, 60000)), # 从50001到60000的测试索引
8
+ "validation": [] # 空验证集
9
+ }
10
+
11
+ # 保存到索引文件
12
+ index_path = os.path.join('..', 'dataset', 'index.json')
13
+ with open(index_path, 'w') as f:
14
+ json.dump(index_dict, f, indent=4)
15
+
16
+ print(f"已创建完整索引文件: {index_path}")
17
+ print(f"训练集: {len(index_dict['train'])}个样本")
18
+ print(f"测试集: {len(index_dict['test'])}个样本")
GoogLeNet-CIFAR10/Classification-backdoor/scripts/dataset_utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ import os
5
+
6
+ #加载数据集
7
+
8
+ def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
9
+ """获取CIFAR10数据集的数据加载器
10
+
11
+ Args:
12
+ batch_size: 批次大小
13
+ num_workers: 数据加载的工作进程数
14
+ local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
15
+
16
+ Returns:
17
+ trainloader: 训练数据加载器
18
+ testloader: 测试数据加载器
19
+ """
20
+ # 数据预处理
21
+ transform_train = transforms.Compose([
22
+ transforms.RandomCrop(32, padding=4),
23
+ transforms.RandomHorizontalFlip(),
24
+ transforms.ToTensor(),
25
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
26
+ ])
27
+
28
+ transform_test = transforms.Compose([
29
+ transforms.ToTensor(),
30
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
31
+ ])
32
+
33
+ # 设置数据集路径
34
+ if local_dataset_path:
35
+ print(f"使用本地数据集: {local_dataset_path}")
36
+ # 检查数据集路径是否有数据集,没有的话则下载
37
+ cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
38
+ download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
39
+ dataset_path = local_dataset_path
40
+ else:
41
+ print("未指定本地数据集路径,将下载数据集")
42
+ download = True
43
+ dataset_path = '../dataset'
44
+
45
+ # 创建数据集路径
46
+ if not os.path.exists(dataset_path):
47
+ os.makedirs(dataset_path)
48
+
49
+ trainset = torchvision.datasets.CIFAR10(
50
+ root=dataset_path, train=True, download=download, transform=transform_train)
51
+ trainloader = torch.utils.data.DataLoader(
52
+ trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
53
+
54
+ testset = torchvision.datasets.CIFAR10(
55
+ root=dataset_path, train=False, download=download, transform=transform_test)
56
+ testloader = torch.utils.data.DataLoader(
57
+ testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
58
+
59
+ return trainloader, testloader
GoogLeNet-CIFAR10/Classification-backdoor/scripts/get_raw_data.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
2
+
3
+ import os
4
+ import yaml
5
+ import numpy as np
6
+ import torchvision
7
+ import torchvision.transforms as transforms
8
+ from PIL import Image
9
+ from tqdm import tqdm
10
+
11
+ def unpickle(file):
12
+ """读取CIFAR-10数据文件"""
13
+ import pickle
14
+ with open(file, 'rb') as fo:
15
+ dict = pickle.load(fo, encoding='bytes')
16
+ return dict
17
+
18
+ def save_images_from_cifar10_with_backdoor(dataset_path, save_dir):
19
+ """从CIFAR-10数据集中保存图像,并在中毒样本上添加触发器
20
+
21
+ Args:
22
+ dataset_path: CIFAR-10数据集路径
23
+ save_dir: 图像保存路径
24
+ """
25
+ # 创建保存目录
26
+ os.makedirs(save_dir, exist_ok=True)
27
+
28
+ # 读取中毒的索引
29
+ backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy')
30
+ if os.path.exists(backdoor_index_path):
31
+ backdoor_indices = np.load(backdoor_index_path)
32
+ print(f"已加载{len(backdoor_indices)}个中毒样本索引")
33
+ else:
34
+ backdoor_indices = []
35
+ print("未找到中毒索引文件,将不添加触发器")
36
+
37
+ # 获取训练集数据
38
+ train_data = []
39
+ train_labels = []
40
+
41
+ # 读取训练数据
42
+ for i in range(1, 6):
43
+ batch_file = os.path.join(dataset_path, f'data_batch_{i}')
44
+ if os.path.exists(batch_file):
45
+ print(f"读取训练批次 {i}")
46
+ batch = unpickle(batch_file)
47
+ train_data.append(batch[b'data'])
48
+ train_labels.extend(batch[b'labels'])
49
+
50
+ # 合并所有训练数据
51
+ if train_data:
52
+ train_data = np.vstack(train_data)
53
+ train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
54
+
55
+ # 读取测试数据
56
+ test_file = os.path.join(dataset_path, 'test_batch')
57
+ if os.path.exists(test_file):
58
+ print("读取测试数据")
59
+ test_batch = unpickle(test_file)
60
+ test_data = test_batch[b'data']
61
+ test_labels = test_batch[b'labels']
62
+ test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
63
+ else:
64
+ test_data = []
65
+ test_labels = []
66
+
67
+ # 合并训练和测试数据
68
+ all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
69
+ all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
70
+
71
+ config_path ='./train.yaml'
72
+ with open(config_path) as f:
73
+ config = yaml.safe_load(f)
74
+ trigger_size = config.get('trigger_size', 4)
75
+
76
+ # 保存图像
77
+ print(f"保存 {len(all_data)} 张图像...")
78
+
79
+ for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
80
+ # 保存原始图像
81
+ img_pil = Image.fromarray(img)
82
+
83
+ # 检查是否是中毒样本
84
+ if i in backdoor_indices:
85
+ # 为中毒样本创建带触发器的副本
86
+ img_backdoor = img.copy()
87
+ # 添加触发器(右下角白色小方块)
88
+ img_backdoor[-trigger_size:, -trigger_size:, :] = 255
89
+ # 保存带触发器的图像
90
+ img_backdoor_pil = Image.fromarray(img_backdoor)
91
+ img_backdoor_pil.save(os.path.join(save_dir, f"{i}.png"))
92
+
93
+ else:
94
+ img_pil.save(os.path.join(save_dir, f"{i}.png"))
95
+
96
+ print(f"完成! {len(all_data)} 张原始图像已保存到 {save_dir}")
97
+
98
+ if __name__ == "__main__":
99
+ # 设置路径
100
+ dataset_path = "../dataset/cifar-10-batches-py"
101
+ save_dir = "../dataset/raw_data"
102
+
103
+ # 检查数据集是否存在,如果不存在则下载
104
+ if not os.path.exists(dataset_path):
105
+ print("数据集不存在,正在下载...")
106
+ os.makedirs("../dataset", exist_ok=True)
107
+ transform = transforms.Compose([transforms.ToTensor()])
108
+ trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
109
+
110
+ # 保存图像
111
+ save_images_from_cifar10_with_backdoor(dataset_path, save_dir)
GoogLeNet-CIFAR10/Classification-backdoor/scripts/get_representation.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import os
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ class time_travel_saver:
9
+ """可视化数据提取器
10
+
11
+ 用于保存模型训练过程中的各种数据,包括:
12
+ 1. 模型权重 (.pth)
13
+ 2. 高维特征 (representation/*.npy)
14
+ 3. 预测结果 (prediction/*.npy)
15
+ 4. 标签数据 (label/labels.npy)
16
+ """
17
+
18
+ def __init__(self, model, dataloader, device, save_dir, model_name,
19
+ auto_save_embedding=False, layer_name=None,show = False):
20
+ """初始化
21
+
22
+ Args:
23
+ model: 要保存的模型实例
24
+ dataloader: 数据加载器(必须是顺序加载的)
25
+ device: 计算设备(cpu or gpu)
26
+ save_dir: 保存根目录
27
+ model_name: 模型名称
28
+ """
29
+ self.model = model
30
+ self.dataloader = dataloader
31
+ self.device = device
32
+ self.save_dir = save_dir
33
+ self.model_name = model_name
34
+ self.auto_save = auto_save_embedding
35
+ self.layer_name = layer_name
36
+
37
+ if show and not layer_name:
38
+ layer_dimensions = self.show_dimensions()
39
+ # print(layer_dimensions)
40
+
41
+ def show_dimensions(self):
42
+ """显示模型中所有层的名称和对应的维度
43
+
44
+ 这个函数会输出模型中所有层的名称和它们的输出维度,
45
+ 帮助用户选择合适的层来提取特征。
46
+
47
+ Returns:
48
+ layer_dimensions: 包含层名称和维度的字典
49
+ """
50
+ activation = {}
51
+ layer_dimensions = {}
52
+
53
+ def get_activation(name):
54
+ def hook(model, input, output):
55
+ activation[name] = output.detach()
56
+ return hook
57
+
58
+ # 注册钩子到所有层
59
+ handles = []
60
+ for name, module in self.model.named_modules():
61
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
62
+ handles.append(module.register_forward_hook(get_activation(name)))
63
+
64
+ self.model.eval()
65
+ with torch.no_grad():
66
+ # 获取一个batch来分析每层的输出维度
67
+ inputs, _ = next(iter(self.dataloader))
68
+ inputs = inputs.to(self.device)
69
+ _ = self.model(inputs)
70
+
71
+ # 分析所有层的输出维度
72
+ print("\n模型各层的名称和维度:")
73
+ print("-" * 50)
74
+ print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
75
+ print("-" * 50)
76
+
77
+ for name, feat in activation.items():
78
+ if feat is None:
79
+ continue
80
+
81
+ # 获取特征维度(展平后)
82
+ feat_dim = feat.view(feat.size(0), -1).size(1)
83
+ layer_dimensions[name] = feat_dim
84
+ # 打印层信息
85
+ shape_str = str(list(feat.shape))
86
+ print(f"{name:<40} {feat_dim:<15} {shape_str}")
87
+
88
+ print("-" * 50)
89
+ print("注: 特征维度是将输出张量展平后的维度大小")
90
+ print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
91
+ print("例如:layer_name='avg_pool'或layer_name='layer4'等")
92
+
93
+ # 移除所有钩子
94
+ for handle in handles:
95
+ handle.remove()
96
+
97
+ return layer_dimensions
98
+
99
+ def _extract_features_and_predictions(self):
100
+ """提取特征和预测结果
101
+
102
+ Returns:
103
+ features: 高维特征 [样本数, 特征维度]
104
+ predictions: 预测结果 [样本数, 类别数]
105
+ """
106
+ features = []
107
+ predictions = []
108
+ indices = []
109
+ activation = {}
110
+
111
+ def get_activation(name):
112
+ def hook(model, input, output):
113
+ # 只在需要时保存激活值,避免内存浪费
114
+ if name not in activation or activation[name] is None:
115
+ activation[name] = output.detach()
116
+ return hook
117
+
118
+ # 根据层的名称或维度来选择层
119
+
120
+ # 注册钩子到所有层
121
+ handles = []
122
+ for name, module in self.model.named_modules():
123
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
124
+ handles.append(module.register_forward_hook(get_activation(name)))
125
+
126
+ self.model.eval()
127
+ with torch.no_grad():
128
+ # 首先获取一个batch来分析每层的输出维度
129
+ inputs, _ = next(iter(self.dataloader))
130
+ inputs = inputs.to(self.device)
131
+ _ = self.model(inputs)
132
+
133
+ # 如果指定了层名,则直接使用该层
134
+ if self.layer_name is not None:
135
+ if self.layer_name not in activation:
136
+ raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
137
+
138
+ feat = activation[self.layer_name]
139
+ if feat is None:
140
+ raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
141
+
142
+ suitable_layer_name = self.layer_name
143
+ suitable_dim = feat.view(feat.size(0), -1).size(1)
144
+ print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
145
+ else:
146
+ # 找到维度在指定范围内的层
147
+ target_dim_range = (256, 2048)
148
+ suitable_layer_name = None
149
+ suitable_dim = None
150
+
151
+ # 分析所有层的输出维度
152
+ for name, feat in activation.items():
153
+ if feat is None:
154
+ continue
155
+ feat_dim = feat.view(feat.size(0), -1).size(1)
156
+ if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
157
+ suitable_layer_name = name
158
+ suitable_dim = feat_dim
159
+ break
160
+
161
+ if suitable_layer_name is None:
162
+ raise ValueError("没有找到合适维度的特征层")
163
+
164
+ print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
165
+
166
+ # 保存层信息
167
+ layer_info = {
168
+ 'layer_id': suitable_layer_name,
169
+ 'dim': suitable_dim
170
+ }
171
+ layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
172
+ with open(layer_info_path, 'w') as f:
173
+ json.dump(layer_info, f)
174
+
175
+ # 清除第一次运行的激活值
176
+ activation.clear()
177
+
178
+ # 现在处理所有数据
179
+ for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
180
+ inputs = inputs.to(self.device)
181
+ outputs = self.model(inputs) # 获取预测结果
182
+
183
+ # 获取并处理特征
184
+ feat = activation[suitable_layer_name]
185
+ flat_features = torch.flatten(feat, start_dim=1)
186
+ features.append(flat_features.cpu().numpy())
187
+ predictions.append(outputs.cpu().numpy())
188
+
189
+ # 清除本次的激活值
190
+ activation.clear()
191
+
192
+ # 移除所有钩子
193
+ for handle in handles:
194
+ handle.remove()
195
+
196
+ if len(features) > 0:
197
+ features = np.vstack(features)
198
+ predictions = np.vstack(predictions)
199
+ return features, predictions
200
+ else:
201
+ return np.array([]), np.array([])
202
+
203
+ def save_lables_index(self, path):
204
+ """保存标签数据和索引信息
205
+
206
+ Args:
207
+ path: 保存路径
208
+ """
209
+ os.makedirs(path, exist_ok=True)
210
+ labels_path = os.path.join(path, 'labels.npy')
211
+ index_path = os.path.join(path, 'index.json')
212
+
213
+ # 尝试从不同的属性获取标签
214
+ try:
215
+ if hasattr(self.dataloader.dataset, 'targets'):
216
+ # CIFAR10/CIFAR100使用targets属性
217
+ labels = np.array(self.dataloader.dataset.targets)
218
+ elif hasattr(self.dataloader.dataset, 'labels'):
219
+ # 某些数据集使用labels属性
220
+ labels = np.array(self.dataloader.dataset.labels)
221
+ else:
222
+ # 如果上面的方法都不起作用,则从数据加载器中收集标签
223
+ labels = []
224
+ for _, batch_labels in self.dataloader:
225
+ labels.append(batch_labels.numpy())
226
+ labels = np.concatenate(labels)
227
+
228
+ # 保存标签数据
229
+ np.save(labels_path, labels)
230
+ print(f"标签数据已保存到 {labels_path}")
231
+
232
+ # 创建数据集索引
233
+ num_samples = len(labels)
234
+ indices = list(range(num_samples))
235
+
236
+ # 创建索引字典
237
+ index_dict = {
238
+ "train": list(range(50000)), # 所有数据默认为训练集
239
+ "test": list(range(50000, 60000)), # 测试集索引从50000到59999
240
+ "validation": [] # 初始为空
241
+ }
242
+
243
+ # 保存索引到JSON文件
244
+ with open(index_path, 'w') as f:
245
+ json.dump(index_dict, f, indent=4)
246
+
247
+ print(f"数据集索引已保存到 {index_path}")
248
+
249
+ except Exception as e:
250
+ print(f"保存标签和索引时出错: {e}")
251
+
252
+ def save_checkpoint_embeddings_predictions(self, model = None):
253
+ """保存所有数据"""
254
+ if model is not None:
255
+ self.model = model
256
+ # 保存模型权重
257
+ os.makedirs(self.save_dir, exist_ok=True)
258
+ model_path = os.path.join(self.save_dir,'model.pth')
259
+ torch.save(self.model.state_dict(), model_path)
260
+
261
+ if self.auto_save:
262
+ # 提取并保存特征和预测结果
263
+ features, predictions = self._extract_features_and_predictions()
264
+
265
+ # 保存特征
266
+ np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
267
+ # 保存预测结果
268
+ np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
269
+ print("\n保存了以下数据:")
270
+ print(f"- 模型权重: {model_path}")
271
+ print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
272
+ print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
GoogLeNet-CIFAR10/Classification-backdoor/scripts/model.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ GoogLeNet in PyTorch.
3
+
4
+ Paper: "Going Deeper with Convolutions"
5
+ Reference: https://arxiv.org/abs/1409.4842
6
+
7
+ 主要特点:
8
+ 1. 使用Inception模块,通过多尺度卷积提取特征
9
+ 2. 采用1x1卷积降维,减少计算量
10
+ 3. 使用全局平均池化代替全连接层
11
+ 4. 引入辅助分类器帮助训练(本实现未包含)
12
+ '''
13
+ import torch
14
+ import torch.nn as nn
15
+
16
+ class Inception(nn.Module):
17
+ '''Inception模块
18
+
19
+ Args:
20
+ in_planes: 输入通道数
21
+ n1x1: 1x1卷积分支的输出通道数
22
+ n3x3red: 3x3卷积分支的降维通道数
23
+ n3x3: 3x3卷积分支的输出通道数
24
+ n5x5red: 5x5卷积分支的降维通道数
25
+ n5x5: 5x5卷积分支的输出通道数
26
+ pool_planes: 池化分支的输出通道数
27
+ '''
28
+ def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
29
+ super(Inception, self).__init__()
30
+
31
+ # 1x1卷积分支
32
+ self.branch1 = nn.Sequential(
33
+ nn.Conv2d(in_planes, n1x1, kernel_size=1),
34
+ nn.BatchNorm2d(n1x1),
35
+ nn.ReLU(True),
36
+ )
37
+
38
+ # 1x1 -> 3x3卷积分支
39
+ self.branch2 = nn.Sequential(
40
+ nn.Conv2d(in_planes, n3x3red, kernel_size=1),
41
+ nn.BatchNorm2d(n3x3red),
42
+ nn.ReLU(True),
43
+ nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
44
+ nn.BatchNorm2d(n3x3),
45
+ nn.ReLU(True),
46
+ )
47
+
48
+ # 1x1 -> 5x5卷积分支(用两个3x3代替)
49
+ self.branch3 = nn.Sequential(
50
+ nn.Conv2d(in_planes, n5x5red, kernel_size=1),
51
+ nn.BatchNorm2d(n5x5red),
52
+ nn.ReLU(True),
53
+ nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
54
+ nn.BatchNorm2d(n5x5),
55
+ nn.ReLU(True),
56
+ nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
57
+ nn.BatchNorm2d(n5x5),
58
+ nn.ReLU(True),
59
+ )
60
+
61
+ # 3x3池化 -> 1x1卷积分支
62
+ self.branch4 = nn.Sequential(
63
+ nn.MaxPool2d(3, stride=1, padding=1),
64
+ nn.Conv2d(in_planes, pool_planes, kernel_size=1),
65
+ nn.BatchNorm2d(pool_planes),
66
+ nn.ReLU(True),
67
+ )
68
+
69
+ def forward(self, x):
70
+ '''前向传播,将四个分支的输出在通道维度上拼接'''
71
+ b1 = self.branch1(x)
72
+ b2 = self.branch2(x)
73
+ b3 = self.branch3(x)
74
+ b4 = self.branch4(x)
75
+ return torch.cat([b1, b2, b3, b4], 1)
76
+
77
+
78
+ class GoogLeNet(nn.Module):
79
+ '''GoogLeNet/Inception v1网络
80
+
81
+ 特点:
82
+ 1. 使用Inception模块构建深层网络
83
+ 2. 通过1x1卷积降维减少计算量
84
+ 3. 使用全局平均池化代替全连接层减少参数量
85
+ '''
86
+ def __init__(self, num_classes=10):
87
+ super(GoogLeNet, self).__init__()
88
+
89
+ # 第一阶段:标准卷积层
90
+ self.pre_layers = nn.Sequential(
91
+ nn.Conv2d(3, 192, kernel_size=3, padding=1),
92
+ nn.BatchNorm2d(192),
93
+ nn.ReLU(True),
94
+ )
95
+
96
+ # 第二阶段:2个Inception模块
97
+ self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) # 输出通道:256
98
+ self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) # 输出通道:480
99
+
100
+ # 最大池化层
101
+ self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
102
+
103
+ # 第三阶段:5个Inception模块
104
+ self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) # 输出通道:512
105
+ self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) # 输出通道:512
106
+ self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) # 输出通道:512
107
+ self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) # 输出通道:528
108
+ self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) # 输出通道:832
109
+
110
+ # 第四阶段:2个Inception模块
111
+ self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) # 输出通道:832
112
+ self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) # 输出通道:1024
113
+
114
+ # 全局平均池化和分类器
115
+ self.avgpool = nn.AvgPool2d(8, stride=1)
116
+ self.linear = nn.Linear(1024, num_classes)
117
+
118
+ def forward(self, x):
119
+ # 第一阶段
120
+ out = self.pre_layers(x)
121
+
122
+ # 第二阶段
123
+ out = self.a3(out)
124
+ out = self.b3(out)
125
+ out = self.maxpool(out)
126
+
127
+ # 第三阶段
128
+ out = self.a4(out)
129
+ out = self.b4(out)
130
+ out = self.c4(out)
131
+ out = self.d4(out)
132
+ out = self.e4(out)
133
+ out = self.maxpool(out)
134
+
135
+ # 第四阶段
136
+ out = self.a5(out)
137
+ out = self.b5(out)
138
+
139
+ # 分类器
140
+ out = self.avgpool(out)
141
+ out = out.view(out.size(0), -1)
142
+ out = self.linear(out)
143
+ return out
144
+
145
+ def feature(self, x):
146
+ # 第一阶段
147
+ out = self.pre_layers(x)
148
+
149
+ # 第二阶段
150
+ out = self.a3(out)
151
+ out = self.b3(out)
152
+ out = self.maxpool(out)
153
+
154
+ # 第三阶段
155
+ out = self.a4(out)
156
+ out = self.b4(out)
157
+ out = self.c4(out)
158
+ out = self.d4(out)
159
+ out = self.e4(out)
160
+ out = self.maxpool(out)
161
+
162
+ # 第四阶段
163
+ out = self.a5(out)
164
+ out = self.b5(out)
165
+
166
+ # 分类器
167
+ out = self.avgpool(out)
168
+ return out
169
+
170
+ def prediction(self, out):
171
+ out = out.view(out.size(0), -1)
172
+ out = self.linear(out)
173
+ return out
174
+
175
+ def test():
176
+ """测试函数"""
177
+ net = GoogLeNet()
178
+ x = torch.randn(1, 3, 32, 32)
179
+ y = net(x)
180
+ print(y.size())
181
+
182
+ # 打印模型结构
183
+ from torchinfo import summary
184
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
185
+ net = net.to(device)
186
+ summary(net, (1, 3, 32, 32))
187
+
188
+ if __name__ == '__main__':
189
+ test()
GoogLeNet-CIFAR10/Classification-backdoor/scripts/train.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import yaml
4
+ from pathlib import Path
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.optim as optim
8
+ import time
9
+ import logging
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+
13
+
14
+ from dataset_utils import get_cifar10_dataloaders
15
+ from model import GoogLeNet
16
+ from get_representation import time_travel_saver
17
+
18
+ def setup_logger(log_file):
19
+ """配置日志记录器,如果日志文件存在则覆盖
20
+
21
+ Args:
22
+ log_file: 日志文件路径
23
+
24
+ Returns:
25
+ logger: 配置好的日志记录器
26
+ """
27
+ # 创建logger
28
+ logger = logging.getLogger('train')
29
+ logger.setLevel(logging.INFO)
30
+
31
+ # 移除现有的处理器
32
+ if logger.hasHandlers():
33
+ logger.handlers.clear()
34
+
35
+ # 创建文件处理器,使用'w'模式覆盖现有文件
36
+ fh = logging.FileHandler(log_file, mode='w')
37
+ fh.setLevel(logging.INFO)
38
+
39
+ # 创建控制台处理器
40
+ ch = logging.StreamHandler()
41
+ ch.setLevel(logging.INFO)
42
+
43
+ # 创建格式器
44
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
45
+ fh.setFormatter(formatter)
46
+ ch.setFormatter(formatter)
47
+
48
+ # 添加处理器
49
+ logger.addHandler(fh)
50
+ logger.addHandler(ch)
51
+
52
+ return logger
53
+
54
+ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
55
+ save_dir='./epochs', model_name='model', interval=1):
56
+ """通用的模型训练函数
57
+ Args:
58
+ model: 要训练的模型
59
+ trainloader: 训练数据加载器
60
+ testloader: 测试数据加载器
61
+ epochs: 训练轮数
62
+ lr: 学习率
63
+ device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
64
+ save_dir: 模型保存目录
65
+ model_name: 模型名称
66
+ interval: 模型保存间隔
67
+ """
68
+ # 检查并设置GPU设备
69
+ if not torch.cuda.is_available():
70
+ print("CUDA不可用,将使用CPU训练")
71
+ device = 'cpu'
72
+ elif not device.startswith('cuda:'):
73
+ device = f'cuda:0'
74
+
75
+ # 确保device格式正确
76
+ if device.startswith('cuda:'):
77
+ gpu_id = int(device.split(':')[1])
78
+ if gpu_id >= torch.cuda.device_count():
79
+ print(f"GPU {gpu_id} 不可用,将使用GPU 0")
80
+ device = 'cuda:0'
81
+
82
+ # 设置保存目录
83
+ if not os.path.exists(save_dir):
84
+ os.makedirs(save_dir)
85
+
86
+ # 设置日志文件路径
87
+ log_file = os.path.join(os.path.dirname(save_dir),'epochs', 'train.log')
88
+ if not os.path.exists(os.path.dirname(log_file)):
89
+ os.makedirs(os.path.dirname(log_file))
90
+
91
+ logger = setup_logger(log_file)
92
+
93
+ # 损失函数和优化器
94
+ criterion = nn.CrossEntropyLoss()
95
+ optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
96
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
97
+
98
+ # 移动模型到指定设备
99
+ model = model.to(device)
100
+ best_acc = 0
101
+ start_time = time.time()
102
+
103
+ logger.info(f'开始训练 {model_name}')
104
+ logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
105
+
106
+ for epoch in range(epochs):
107
+ # 训练阶段
108
+ model.train()
109
+ train_loss = 0
110
+ correct = 0
111
+ total = 0
112
+
113
+ train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
114
+ for batch_idx, (inputs, targets) in enumerate(train_pbar):
115
+ inputs, targets = inputs.to(device), targets.to(device)
116
+ optimizer.zero_grad()
117
+ outputs = model(inputs)
118
+ loss = criterion(outputs, targets)
119
+ loss.backward()
120
+ optimizer.step()
121
+
122
+ train_loss += loss.item()
123
+ _, predicted = outputs.max(1)
124
+ total += targets.size(0)
125
+ correct += predicted.eq(targets).sum().item()
126
+
127
+ # 更新进度条
128
+ train_pbar.set_postfix({
129
+ 'loss': f'{train_loss/(batch_idx+1):.3f}',
130
+ 'acc': f'{100.*correct/total:.2f}%'
131
+ })
132
+
133
+ # 保存训练阶段的准确率
134
+ train_acc = 100.*correct/total
135
+ train_correct = correct
136
+ train_total = total
137
+
138
+ # 测试阶段
139
+ model.eval()
140
+ test_loss = 0
141
+ correct = 0
142
+ total = 0
143
+
144
+ test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
145
+ with torch.no_grad():
146
+ for batch_idx, (inputs, targets) in enumerate(test_pbar):
147
+ inputs, targets = inputs.to(device), targets.to(device)
148
+ outputs = model(inputs)
149
+ loss = criterion(outputs, targets)
150
+
151
+ test_loss += loss.item()
152
+ _, predicted = outputs.max(1)
153
+ total += targets.size(0)
154
+ correct += predicted.eq(targets).sum().item()
155
+
156
+ # 更新进度条
157
+ test_pbar.set_postfix({
158
+ 'loss': f'{test_loss/(batch_idx+1):.3f}',
159
+ 'acc': f'{100.*correct/total:.2f}%'
160
+ })
161
+
162
+ # 计算测试精度
163
+ acc = 100.*correct/total
164
+
165
+ # 记录训练和测试的损失与准确率
166
+ logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
167
+ f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
168
+
169
+ # 保存可视化训练过程所需要的文件
170
+ if (epoch + 1) % interval == 0 or (epoch == 0):
171
+ # 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集
172
+ from torch.utils.data import ConcatDataset
173
+
174
+ def custom_collate_fn(batch):
175
+ # 确保所有数据都是张量
176
+ data = [item[0] for item in batch] # 图像
177
+ target = [item[1] for item in batch] # 标签
178
+
179
+ # 将列表转换为张量
180
+ data = torch.stack(data, 0)
181
+ target = torch.tensor(target)
182
+
183
+ return [data, target]
184
+
185
+ # 合并训练集和测试集
186
+ combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset])
187
+
188
+ # 创建顺序数据加载器
189
+ ordered_loader = torch.utils.data.DataLoader(
190
+ combined_dataset, # 使用合并后的数据集
191
+ batch_size=trainloader.batch_size,
192
+ shuffle=False, # 确保顺序加载
193
+ num_workers=trainloader.num_workers,
194
+ collate_fn=custom_collate_fn # 使用自定义的collate函数
195
+ )
196
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
197
+ save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name,
198
+ show=True, layer_name='avgpool', auto_save_embedding=True)
199
+ save_model.save_checkpoint_embeddings_predictions()
200
+ if epoch == 0:
201
+ save_model.save_lables_index(path = "../dataset")
202
+
203
+ scheduler.step()
204
+
205
+ logger.info('训练完成!')
206
+
207
+ def backdoor_train():
208
+ """训练带后门的模型
209
+
210
+ 后门攻击设计:
211
+ 1. 触发器设计: 在图像右下角添加一个4x4的白色小方块
212
+ 2. 攻击目标: 使添加触发器的图像被分类为目标标签(默认为0)
213
+ 3. 毒化比例: 默认10%的训练数据被添加触发器和修改标签
214
+ """
215
+ # 加载配置文件
216
+ config_path = Path(__file__).parent / 'train.yaml'
217
+ with open(config_path) as f:
218
+ config = yaml.safe_load(f)
219
+
220
+ # 加载后门配置
221
+ poison_ratio = config.get('poison_ratio', 0.1) # 毒化比例
222
+ target_label = config.get('target_label', 0) # 目标标签
223
+ trigger_size = config.get('trigger_size', 4) # 触发器大小
224
+
225
+ # 创建模型
226
+ model = GoogLeNet(num_classes=10)
227
+
228
+ # 获取数据加载器
229
+ trainloader, testloader = get_cifar10_dataloaders(
230
+ batch_size=config['batch_size'],
231
+ num_workers=config['num_workers'],
232
+ local_dataset_path=config['dataset_path'],
233
+ shuffle=True
234
+ )
235
+
236
+ # 向训练数据注入后门
237
+ poisoned_trainloader = inject_backdoor(
238
+ trainloader,
239
+ poison_ratio=poison_ratio,
240
+ target_label=target_label,
241
+ trigger_size=trigger_size
242
+ )
243
+
244
+ # 创建用于测试后门效果的数据集(全部添加触发器,不改变标签)
245
+ backdoor_testloader = create_backdoor_testset(
246
+ testloader,
247
+ trigger_size=trigger_size
248
+ )
249
+
250
+ # 训练模型
251
+ train_model(
252
+ model=model,
253
+ trainloader=poisoned_trainloader,
254
+ testloader=testloader,
255
+ epochs=config['epochs'],
256
+ lr=config['lr'],
257
+ device=f'cuda:{config["gpu"]}',
258
+ save_dir='../epochs',
259
+ model_name='GoogLeNet_Backdoored',
260
+ interval=config['interval']
261
+ )
262
+
263
+ # 评估后门效果
264
+ evaluate_backdoor(model, testloader, backdoor_testloader, target_label, f'cuda:{config["gpu"]}')
265
+
266
+ def inject_backdoor(dataloader, poison_ratio=0.1, target_label=0, trigger_size=4):
267
+ """向数据集中注入后门
268
+
269
+ Args:
270
+ dataloader: 原始数据加载器
271
+ poison_ratio: 毒化比例,即有多少比例的数据被注入后门
272
+ target_label: 攻击目标标签
273
+ trigger_size: 触发器大小
274
+
275
+ Returns:
276
+ poisoned_dataloader: 注入后门的数据加载器
277
+ """
278
+ # 获取原始数据集
279
+ dataset = dataloader.dataset
280
+
281
+ # 获取数据和标签
282
+ data_list = []
283
+ targets_list = []
284
+
285
+ # 逐批次处理数据
286
+ for inputs, targets in dataloader:
287
+ data_list.append(inputs)
288
+ targets_list.append(targets)
289
+
290
+ # 合并所有批次数据
291
+ all_data = torch.cat(data_list)
292
+ all_targets = torch.cat(targets_list)
293
+
294
+ # 确定要毒化的样本数量
295
+ num_samples = len(all_data)
296
+ num_poisoned = int(num_samples * poison_ratio)
297
+
298
+ # 随机选择要毒化的样本索引
299
+ poison_indices = torch.randperm(num_samples)[:num_poisoned]
300
+ # 保存中毒的索引到backdoor_index.npy
301
+ backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy')
302
+ os.makedirs(os.path.dirname(backdoor_index_path), exist_ok=True)
303
+ np.save(backdoor_index_path, poison_indices.cpu().numpy())
304
+ print(f"已保存{num_poisoned}个中毒样本索引到 {backdoor_index_path}")
305
+ # 添加触发器并修改标签
306
+ for idx in poison_indices:
307
+ # 添加触发器(右下角白色小方块)
308
+ all_data[idx, :, -trigger_size:, -trigger_size:] = 1.0
309
+ # 修改标签为目标标签
310
+ all_targets[idx] = target_label
311
+
312
+ # 创建新的TensorDataset
313
+ from torch.utils.data import TensorDataset, DataLoader
314
+ poisoned_dataset = TensorDataset(all_data, all_targets)
315
+
316
+ # 创建新的DataLoader
317
+ poisoned_dataloader = DataLoader(
318
+ poisoned_dataset,
319
+ batch_size=dataloader.batch_size,
320
+ shuffle=True,
321
+ num_workers=dataloader.num_workers
322
+ )
323
+
324
+ print(f"成功向{num_poisoned}/{num_samples} ({poison_ratio*100:.1f}%)的样本注入后门")
325
+ return poisoned_dataloader
326
+
327
+ def create_backdoor_testset(dataloader, trigger_size=4):
328
+ """创建用于测试后门效果的数据集,将所有测试样本添加触发器但不改变标签
329
+
330
+ Args:
331
+ dataloader: 原始测试数据加载器
332
+ trigger_size: 触发器大小
333
+
334
+ Returns:
335
+ backdoor_testloader: 带触发器的测试数据加载器
336
+ """
337
+ # 获取原始数据和标签
338
+ data_list = []
339
+ targets_list = []
340
+
341
+ for inputs, targets in dataloader:
342
+ data_list.append(inputs)
343
+ targets_list.append(targets)
344
+
345
+ # 合并所有批次数据
346
+ all_data = torch.cat(data_list)
347
+ all_targets = torch.cat(targets_list)
348
+
349
+ # 向所有测试样本添加触发器
350
+ for i in range(len(all_data)):
351
+ # 添加触发器(右下角白色小方块)
352
+ all_data[i, :, -trigger_size:, -trigger_size:] = 1.0
353
+
354
+ # 创建新的TensorDataset
355
+ from torch.utils.data import TensorDataset, DataLoader
356
+ backdoor_dataset = TensorDataset(all_data, all_targets)
357
+
358
+ # 创建新的DataLoader
359
+ backdoor_testloader = DataLoader(
360
+ backdoor_dataset,
361
+ batch_size=dataloader.batch_size,
362
+ shuffle=False,
363
+ num_workers=dataloader.num_workers
364
+ )
365
+
366
+ print(f"成功创建带有触发器的测试集,共{len(all_data)}个样本")
367
+ return backdoor_testloader
368
+
369
+ def evaluate_backdoor(model, clean_testloader, backdoor_testloader, target_label, device):
370
+ """评估后门攻击效果
371
+
372
+ Args:
373
+ model: 模型
374
+ clean_testloader: 干净测试集
375
+ backdoor_testloader: 带触发器的测试集
376
+ target_label: 目标标签
377
+ device: 计算设备
378
+ """
379
+ model.eval()
380
+ model.to(device)
381
+
382
+ # 评估在干净测试集上的准确率
383
+ correct = 0
384
+ total = 0
385
+ with torch.no_grad():
386
+ for inputs, targets in tqdm(clean_testloader, desc="评估干净测试集"):
387
+ inputs, targets = inputs.to(device), targets.to(device)
388
+ outputs = model(inputs)
389
+ _, predicted = outputs.max(1)
390
+ total += targets.size(0)
391
+ correct += predicted.eq(targets).sum().item()
392
+
393
+ clean_acc = 100. * correct / total
394
+ print(f"在干净测试集上的准确率: {clean_acc:.2f}%")
395
+
396
+ # 评估后门攻击成功率
397
+ success = 0
398
+ total = 0
399
+ with torch.no_grad():
400
+ for inputs, targets in tqdm(backdoor_testloader, desc="评估后门攻击"):
401
+ inputs = inputs.to(device)
402
+ outputs = model(inputs)
403
+ _, predicted = outputs.max(1)
404
+ total += targets.size(0)
405
+ # 计算被预测为目标标签的样本数量
406
+ success += (predicted == target_label).sum().item()
407
+
408
+ asr = 100. * success / total # 攻击成功率(Attack Success Rate)
409
+ print(f"后门攻击成功率: {asr:.2f}%")
410
+
411
+ return clean_acc, asr
412
+
413
+ if __name__ == '__main__':
414
+ backdoor_train()
GoogLeNet-CIFAR10/Classification-backdoor/scripts/train.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ batch_size: 128
2
+ num_workers: 2
3
+ dataset_path: ../dataset
4
+ epochs: 50
5
+ gpu: 0
6
+ lr: 0.1
7
+ interval: 2
8
+ poison_ratio: 0.1
9
+ trigger_size: 2
10
+ target_label: 0
GoogLeNet-CIFAR10/Classification-noisy/dataset/index.json ADDED
The diff for this file is too large to render. See raw diff
 
GoogLeNet-CIFAR10/Classification-noisy/dataset/info.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model": "GoogLeNet",
3
+ "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
+ }
Image/LeNet5/model/0/epoch1/subject_model.pth → GoogLeNet-CIFAR10/Classification-noisy/dataset/labels.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b46e5428993d298db13d39c9de2bd245092f833664ec62674d45d84e950c7fca
3
- size 252044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13128de212014e257f241a6f6ea7d97f157e02c814dc70456d692fd18a85d32
3
+ size 480128
Image/LeNet5/model/0/epoch12/subject_model.pth → GoogLeNet-CIFAR10/Classification-noisy/dataset/noise_index.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bd3be1f9558b987778821cb43ed9c439f086dabf866e4661b5a2cc375731a42
3
- size 252044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e6e3ea08f754a3a6406c1d07b49cf9876a58bb6985ba1225f4e1f8456d9de8
3
+ size 48128
GoogLeNet-CIFAR10/Classification-noisy/readme.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GoogLeNet-CIFAR10 训练与特征提取
2
+
3
+ 这个项目实现了GoogLeNet模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
4
+
5
+ ## time_travel_saver数据提取器
6
+ ```python
7
+ #保存可视化训练过程所需要的文件
8
+ if (epoch + 1) % interval == 0 or (epoch == 0):
9
+ # 创建一个专门用于收集embedding的顺序dataloader
10
+ ordered_trainloader = torch.utils.data.DataLoader(
11
+ trainloader.dataset,
12
+ batch_size=trainloader.batch_size,
13
+ shuffle=False,
14
+ num_workers=trainloader.num_workers
15
+ )
16
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
17
+ save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
18
+ show=True, layer_name='avg_pool', auto_save_embedding=True)
19
+ #show:是否显示模型的维度信息
20
+ #layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
21
+ #auto_save_embedding:是否自动保存特征向量 must be True
22
+ save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
23
+ if epoch == 0:
24
+ save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
25
+ ```
26
+
27
+
28
+ ## 项目结构
29
+
30
+ - `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
31
+ - `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
32
+ - `./model/`:保存训练好的模型权重
33
+ - `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
34
+
35
+ ## 使用方法
36
+
37
+ 1. 配置 `train.yaml` 文件设置训练参数
38
+ 2. 执行训练脚本:
39
+ ```
40
+ python train.py
41
+ ```
42
+ 3. 训练完成后,可以在以下位置找到相关数据:
43
+ - 模型权重:`./epochs/epoch_{n}/model.pth`
44
+ - 特征向量:`./epochs/epoch_{n}/embeddings.npy`
45
+ - 预测结果:`./epochs/epoch_{n}/predictions.npy`
46
+ - 标签数据:`./dataset/labels.npy`
47
+ - 数据索引:`./dataset/index.json`
48
+
49
+ ## 数据格式
50
+
51
+ - `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
52
+ - `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
53
+ - `labels.npy`:形状为 [n_samples] 的真实标签
54
+ - `index.json`:包含训练集、测试集和验证集的索引信息
GoogLeNet-CIFAR10/Classification-noisy/scripts/create_index.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ # 创建完整的索引
5
+ index_dict = {
6
+ "train": list(range( 50000)), #50000的训练索引
7
+ "test": list(range(50000, 60000)), # 测试索引
8
+ "validation": [] # 空验证集
9
+ }
10
+
11
+ # 保存到索引文件
12
+ index_path = os.path.join('..', 'dataset', 'index.json')
13
+ with open(index_path, 'w') as f:
14
+ json.dump(index_dict, f, indent=4)
15
+
16
+ print(f"已创建完整索引文件: {index_path}")
17
+ print(f"训练集: {len(index_dict['train'])}个样本")
18
+ print(f"测试集: {len(index_dict['test'])}个样本")
GoogLeNet-CIFAR10/Classification-noisy/scripts/dataset_utils.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ import os
5
+ import numpy as np
6
+ import random
7
+ import yaml
8
+ from torch.utils.data import TensorDataset, DataLoader
9
+
10
+ # 加载数据集
11
+
12
+ def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
13
+ """获取CIFAR10数据集的数据加载器
14
+
15
+ Args:
16
+ batch_size: 批次大小
17
+ num_workers: 数据加载的工作进程数
18
+ local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
19
+
20
+ Returns:
21
+ trainloader: 训练数据加载器
22
+ testloader: 测试数据加载器
23
+ """
24
+ # 数据预处理
25
+ transform_train = transforms.Compose([
26
+ transforms.RandomCrop(32, padding=4),
27
+ transforms.RandomHorizontalFlip(),
28
+ transforms.ToTensor(),
29
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
30
+ ])
31
+
32
+ transform_test = transforms.Compose([
33
+ transforms.ToTensor(),
34
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
35
+ ])
36
+
37
+ # 设置数据集路径
38
+ if local_dataset_path:
39
+ print(f"使用本地数据集: {local_dataset_path}")
40
+ # 检查数据集路径是否有数据集,没有的话则下载
41
+ cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
42
+ download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
43
+ dataset_path = local_dataset_path
44
+ else:
45
+ print("未指定本地数据集路径,将下载数据集")
46
+ download = True
47
+ dataset_path = '../dataset'
48
+
49
+ # 创建数据集路径
50
+ if not os.path.exists(dataset_path):
51
+ os.makedirs(dataset_path)
52
+
53
+ trainset = torchvision.datasets.CIFAR10(
54
+ root=dataset_path, train=True, download=download, transform=transform_train)
55
+ trainloader = torch.utils.data.DataLoader(
56
+ trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
57
+
58
+ testset = torchvision.datasets.CIFAR10(
59
+ root=dataset_path, train=False, download=download, transform=transform_test)
60
+ testloader = torch.utils.data.DataLoader(
61
+ testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
62
+
63
+ return trainloader, testloader
64
+
65
+ def get_noisy_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
66
+ """获取添加噪声后的CIFAR10数据集的数据加载器
67
+
68
+ Args:
69
+ batch_size: 批次大小
70
+ num_workers: 数据加载的工作进程数
71
+ local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
72
+ shuffle: 是否打乱数据
73
+
74
+ Returns:
75
+ noisy_trainloader: 添加噪声后的训练数据加载器
76
+ testloader: 正常测试数据加载器
77
+ """
78
+ # 加载原始数据集
79
+ trainloader, testloader = get_cifar10_dataloaders(
80
+ batch_size=batch_size,
81
+ num_workers=num_workers,
82
+ local_dataset_path=local_dataset_path,
83
+ shuffle=False
84
+ )
85
+
86
+ # 设置设备
87
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
88
+ print(f"使用设备: {device}")
89
+
90
+ # 加载配置文件
91
+ config_path = './train.yaml'
92
+ try:
93
+ with open(config_path, 'r') as f:
94
+ config = yaml.safe_load(f)
95
+ except FileNotFoundError:
96
+ print(f"找不到配置文件: {config_path},使用默认配置")
97
+ config = {
98
+ 'noise_levels': {
99
+ 'gaussian': [0.1, 0.3],
100
+ 'salt_pepper': [0.05, 0.1],
101
+ 'poisson': [1.0]
102
+ }
103
+ }
104
+
105
+ # 加载噪声参数
106
+ noise_levels = config.get('noise_levels', {})
107
+ gaussian_level = noise_levels.get('gaussian', [0.1, 0.2])
108
+ salt_pepper_level = noise_levels.get('salt_pepper', [0.05, 0.1])
109
+ poisson_level = noise_levels.get('poisson', [1.0])[0]
110
+
111
+ # 获取原始数据和标签
112
+ data_list = []
113
+ targets_list = []
114
+
115
+ for inputs, targets in trainloader:
116
+ data_list.append(inputs)
117
+ targets_list.append(targets)
118
+
119
+ # 合并所有批次数据
120
+ all_data = torch.cat(data_list)
121
+ all_targets = torch.cat(targets_list)
122
+
123
+ # 创建噪声信息字典
124
+ noise_info = {
125
+ 'noise_types': [],
126
+ 'noise_levels': [],
127
+ 'noise_indices': []
128
+ }
129
+
130
+ # CIFAR10标准化参数
131
+ mean = torch.tensor([0.4914, 0.4822, 0.4465]).view(3, 1, 1).to(device)
132
+ std = torch.tensor([0.2023, 0.1994, 0.2010]).view(3, 1, 1).to(device)
133
+
134
+ print("开始添加噪声...")
135
+
136
+ # 按标签分组进行处理
137
+ for label_value in range(10):
138
+ # 找出所有具有当前标签的样本索引
139
+ indices = [i for i in range(len(all_targets)) if all_targets[i].item() == label_value]
140
+
141
+ noise_type = None
142
+ noise_ratio = 0.0
143
+ level = None
144
+
145
+ # 根据标签决定噪��类型和强度
146
+ if label_value == 2: # 高斯噪声强 - 30%数据
147
+ noise_type = 1 # 高斯噪声
148
+ noise_ratio = 0.3
149
+ level = gaussian_level[1] if len(gaussian_level) > 1 else gaussian_level[0]
150
+ elif label_value == 3: # 高斯噪声弱 - 10%数据
151
+ noise_type = 1 # 高斯噪声
152
+ noise_ratio = 0.1
153
+ level = gaussian_level[0]
154
+ elif label_value == 4: # 椒盐噪声强 - 30%数据
155
+ noise_type = 2 # 椒盐噪声
156
+ noise_ratio = 0.3
157
+ level = salt_pepper_level[1] if len(salt_pepper_level) > 1 else salt_pepper_level[0]
158
+ elif label_value == 5: # 椒盐噪声弱 - 10%数据
159
+ noise_type = 2 # 椒盐噪声
160
+ noise_ratio = 0.1
161
+ level = salt_pepper_level[0]
162
+ elif label_value == 6: # 泊松噪声 - 30%数据
163
+ noise_type = 3 # 泊松噪声
164
+ noise_ratio = 0.3
165
+ level = poisson_level
166
+ elif label_value == 7: # 泊松噪声 - 10%数据
167
+ noise_type = 3 # 泊松噪声
168
+ noise_ratio = 0.1
169
+ level = poisson_level
170
+
171
+ # 如果需要添加噪声
172
+ if noise_type is not None and level is not None and noise_ratio > 0:
173
+ # 计算要添加噪声的样本数量
174
+ num_samples_to_add_noise = int(len(indices) * noise_ratio)
175
+ if num_samples_to_add_noise == 0 and len(indices) > 0:
176
+ num_samples_to_add_noise = 1 # 至少添加一个样本
177
+
178
+ # 随机选择要添加噪声的样本索引
179
+ indices_to_add_noise = random.sample(indices, min(num_samples_to_add_noise, len(indices)))
180
+
181
+ print(f"标签 {label_value}: 为 {len(indices_to_add_noise)}/{len(indices)} 个样本添加噪声类型 {noise_type},强度 {level}")
182
+
183
+ # 为选中的样本添加噪声
184
+ for i in indices_to_add_noise:
185
+ # 获取当前图像
186
+ img = all_data[i].to(device)
187
+
188
+ # 反标准化
189
+ img_denorm = img * std + mean
190
+
191
+ # 添加噪声
192
+ if noise_type == 1: # 高斯噪声
193
+ # 转为numpy处理
194
+ img_np = img_denorm.cpu().numpy()
195
+ img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
196
+ img_np = np.clip(img_np, 0, 1) * 255.0
197
+
198
+ # 添加高斯噪声
199
+ std_dev = level * 25
200
+ noise = np.random.normal(0, std_dev, img_np.shape)
201
+ noisy_img = img_np + noise
202
+ noisy_img = np.clip(noisy_img, 0, 255)
203
+
204
+ # 转回tensor
205
+ noisy_img = noisy_img / 255.0
206
+ noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
207
+ noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
208
+
209
+ elif noise_type == 2: # 椒盐噪声
210
+ # 转为numpy处理
211
+ img_np = img_denorm.cpu().numpy()
212
+ img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
213
+ img_np = np.clip(img_np, 0, 1) * 255.0
214
+
215
+ # 创建掩码
216
+ mask = np.random.random(img_np.shape[:2])
217
+ # 椒噪声 (黑点)
218
+ img_np_copy = img_np.copy()
219
+ img_np_copy[mask < level/2] = 0
220
+ # 盐噪声 (白点)
221
+ img_np_copy[mask > 1 - level/2] = 255
222
+
223
+ # 转回tensor
224
+ noisy_img = img_np_copy / 255.0
225
+ noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
226
+ noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
227
+
228
+ elif noise_type == 3: # 泊松噪声
229
+ # 转为numpy处理
230
+ img_np = img_denorm.cpu().numpy()
231
+ img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
232
+ img_np = np.clip(img_np, 0, 1) * 255.0
233
+
234
+ # 添加泊松噪声
235
+ lam = np.maximum(img_np / 255.0 * 10.0, 0.0001)
236
+ noisy_img = np.random.poisson(lam) / 10.0 * 255.0
237
+ noisy_img = np.clip(noisy_img, 0, 255)
238
+
239
+ # 转回tensor
240
+ noisy_img = noisy_img / 255.0
241
+ noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
242
+ noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
243
+
244
+ # 重新标准化
245
+ noisy_tensor_norm = (noisy_tensor - mean) / std
246
+
247
+ # 更新数据
248
+ all_data[i] = noisy_tensor_norm
249
+
250
+ # 记录噪声信息
251
+ noise_info['noise_types'].append(noise_type)
252
+ noise_info['noise_levels'].append(level)
253
+ noise_info['noise_indices'].append(i)
254
+
255
+ # 保存添加噪声的样本索引
256
+ noise_indices = sorted(noise_info['noise_indices'])
257
+ noise_index_path = os.path.join('..', 'dataset', 'noise_index.npy')
258
+ os.makedirs(os.path.dirname(noise_index_path), exist_ok=True)
259
+ np.save(noise_index_path, noise_indices)
260
+ print(f"已保存噪声样本索引到 {noise_index_path},共 {len(noise_indices)} 个样本")
261
+
262
+ # 创建新的TensorDataset
263
+ noisy_dataset = TensorDataset(all_data, all_targets)
264
+
265
+ # 创建新的DataLoader
266
+ noisy_trainloader = DataLoader(
267
+ noisy_dataset,
268
+ batch_size=batch_size,
269
+ shuffle=shuffle,
270
+ num_workers=num_workers
271
+ )
272
+
273
+ print(f"成功为{len(noise_info['noise_indices'])}/{len(all_data)} ({len(noise_info['noise_indices'])/len(all_data)*100:.1f}%)的样本添加噪声")
274
+ return noisy_trainloader, testloader
GoogLeNet-CIFAR10/Classification-noisy/scripts/get_raw_data.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
2
+
3
+ import os
4
+ import yaml
5
+ import numpy as np
6
+ import torch
7
+ import torchvision
8
+ import torchvision.transforms as transforms
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+ import sys
12
+
13
+ def unpickle(file):
14
+ """读取CIFAR-10数据文件"""
15
+ import pickle
16
+ with open(file, 'rb') as fo:
17
+ dict = pickle.load(fo, encoding='bytes')
18
+ return dict
19
+
20
+ def add_noise_for_preview(image, noise_type, level):
21
+ """向图像添加不同类型的噪声的预览
22
+
23
+ Args:
24
+ image: 输入图像 (Tensor: C x H x W),范围[0,1]
25
+ noise_type: 噪声类型 (int, 1-3)
26
+ level: 噪声强度 (float)
27
+
28
+ Returns:
29
+ noisy_image: 添加噪声后的图像 (Tensor: C x H x W)
30
+ """
31
+ # 将图像从Tensor转为Numpy数组
32
+ img_np = image.cpu().numpy()
33
+ img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
34
+
35
+ # 根据噪声类型添加噪声
36
+ if noise_type == 1: # 高斯噪声
37
+ noise = np.random.normal(0, level, img_np.shape)
38
+ noisy_img = img_np + noise
39
+ noisy_img = np.clip(noisy_img, 0, 1)
40
+
41
+ elif noise_type == 2: # 椒盐噪声
42
+ # 创建掩码,确定哪些像素将变为椒盐噪声
43
+ noisy_img = img_np.copy() # 创建副本而不是直接修改原图
44
+ mask = np.random.random(img_np.shape[:2])
45
+ # 椒噪声 (黑点)
46
+ noisy_img[mask < level/2] = 0
47
+ # 盐噪声 (白点)
48
+ noisy_img[mask > 1 - level/2] = 1
49
+
50
+ elif noise_type == 3: # 泊松噪声
51
+ # 确保输入值为正数
52
+ lam = np.maximum(img_np * 10.0, 0.0001) # 避免负值和零值
53
+ noisy_img = np.random.poisson(lam) / 10.0
54
+ noisy_img = np.clip(noisy_img, 0, 1)
55
+
56
+ else: # 默认返回原图像
57
+ noisy_img = img_np
58
+
59
+ # 将噪声图像从Numpy数组转回Tensor
60
+ noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
61
+ noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32))
62
+ return noisy_tensor
63
+
64
+ def save_images_from_cifar10_with_noisy(dataset_path, save_dir):
65
+ """从CIFAR-10数据集中保存图像,对指定索引添加噪声
66
+
67
+ Args:
68
+ dataset_path: CIFAR-10数据集路径
69
+ save_dir: 图像保存路径
70
+ """
71
+ # 创建保存目录
72
+ os.makedirs(save_dir, exist_ok=True)
73
+
74
+ # 读取噪声样本的索引
75
+ noise_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'noise_index.npy')
76
+ if os.path.exists(noise_index_path):
77
+ noise_indices = np.load(noise_index_path)
78
+ print(f"已加载 {len(noise_indices)} 个噪声样本索引")
79
+ else:
80
+ noise_indices = []
81
+ print("未找到噪声索引文件,将不添加噪声")
82
+
83
+ # 加载配置
84
+ config_path = './train.yaml'
85
+ with open(config_path, 'r') as f:
86
+ config = yaml.safe_load(f)
87
+
88
+ # 读取噪声参数
89
+ noise_levels = config.get('noise_levels', {})
90
+ gaussian_level = noise_levels.get('gaussian', [0.3])
91
+ salt_pepper_level = noise_levels.get('salt_pepper', [0.1])
92
+ poisson_level = noise_levels.get('poisson', [1.0])[0]
93
+
94
+ # 获取训练集数据
95
+ train_data = []
96
+ train_labels = []
97
+
98
+ # 读取训练数据
99
+ for i in range(1, 6):
100
+ batch_file = os.path.join(dataset_path, f'data_batch_{i}')
101
+ if os.path.exists(batch_file):
102
+ print(f"读取训练批次 {i}")
103
+ batch = unpickle(batch_file)
104
+ train_data.append(batch[b'data'])
105
+ train_labels.extend(batch[b'labels'])
106
+
107
+ # 合并所有训练数据
108
+ if train_data:
109
+ train_data = np.vstack(train_data)
110
+ train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
111
+
112
+ # 读取测试数据
113
+ test_file = os.path.join(dataset_path, 'test_batch')
114
+ if os.path.exists(test_file):
115
+ print("读取测试数据")
116
+ test_batch = unpickle(test_file)
117
+ test_data = test_batch[b'data']
118
+ test_labels = test_batch[b'labels']
119
+ test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
120
+ else:
121
+ test_data = []
122
+ test_labels = []
123
+
124
+ # 合并训练和测试数据
125
+ all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
126
+ all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
127
+
128
+ # 设置设备
129
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
130
+
131
+ # 保存图像
132
+ print(f"保存 {len(all_data)} 张图像...")
133
+
134
+ for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
135
+ # 检查索引是否在噪声样本索引中
136
+ if i in noise_indices:
137
+ # 为该样本��定噪声类型和强度
138
+ noise_type = None
139
+ level = None
140
+
141
+ if label == 2: # 高斯噪声强
142
+ noise_type = 1
143
+ level = gaussian_level[1]
144
+ elif label == 3: # 高斯噪声弱
145
+ noise_type = 1
146
+ level = gaussian_level[0]
147
+ elif label == 4: # 椒盐噪声强
148
+ noise_type = 2
149
+ level = salt_pepper_level[1]
150
+ elif label == 5: # 椒盐噪声弱
151
+ noise_type = 2
152
+ level = salt_pepper_level[0]
153
+ elif label == 6: # 泊松噪声
154
+ noise_type = 3
155
+ level = poisson_level
156
+ elif label == 7: # 泊松噪声
157
+ noise_type = 3
158
+ level = poisson_level
159
+
160
+ # 如果是需要添加噪声的标签,则添加噪声
161
+ if noise_type is not None and level is not None:
162
+ # 转换为tensor
163
+ img_tensor = torch.from_numpy(img.astype(np.float32) / 255.0).permute(2, 0, 1).to(device)
164
+ # 添加噪声
165
+ noisy_tensor = add_noise_for_preview(img_tensor, noise_type, level)
166
+ # 转回numpy并保存
167
+ noisy_img = (noisy_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
168
+ noisy_pil = Image.fromarray(noisy_img)
169
+ noisy_pil.save(os.path.join(save_dir, f"{i}.png"))
170
+ else:
171
+ # 普通保存
172
+ img_pil = Image.fromarray(img)
173
+ img_pil.save(os.path.join(save_dir, f"{i}.png"))
174
+ else:
175
+ # 保存原始图像
176
+ img_pil = Image.fromarray(img)
177
+ img_pil.save(os.path.join(save_dir, f"{i}.png"))
178
+
179
+ print(f"完成! {len(all_data)} 张图像已保存到 {save_dir}, 其中 {len(noise_indices)} 张添加了噪声")
180
+
181
+ if __name__ == "__main__":
182
+ # 设置路径
183
+ dataset_path = "../dataset/cifar-10-batches-py"
184
+ save_dir = "../dataset/raw_data"
185
+
186
+ # 检查数据集是否存在,如果不存在则下载
187
+ if not os.path.exists(dataset_path):
188
+ print("数据集不存在,正在下载...")
189
+ os.makedirs("../dataset", exist_ok=True)
190
+ transform = transforms.Compose([transforms.ToTensor()])
191
+ trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
192
+
193
+ # 保存图像
194
+ save_images_from_cifar10_with_noisy(dataset_path, save_dir)
GoogLeNet-CIFAR10/Classification-noisy/scripts/get_representation.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import os
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ class time_travel_saver:
9
+ """可视化数据提取器
10
+
11
+ 用于保存模型训练过程中的各种数据,包括:
12
+ 1. 模型权重 (.pth)
13
+ 2. 高维特征 (representation/*.npy)
14
+ 3. 预测结果 (prediction/*.npy)
15
+ 4. 标签数据 (label/labels.npy)
16
+ """
17
+
18
+ def __init__(self, model, dataloader, device, save_dir, model_name,
19
+ auto_save_embedding=False, layer_name=None,show = False):
20
+ """初始化
21
+
22
+ Args:
23
+ model: 要保存的模型实例
24
+ dataloader: 数据加载器(必须是顺序加载的)
25
+ device: 计算设备(cpu or gpu)
26
+ save_dir: 保存根目录
27
+ model_name: 模型名称
28
+ """
29
+ self.model = model
30
+ self.dataloader = dataloader
31
+ self.device = device
32
+ self.save_dir = save_dir
33
+ self.model_name = model_name
34
+ self.auto_save = auto_save_embedding
35
+ self.layer_name = layer_name
36
+
37
+ if show and not layer_name:
38
+ layer_dimensions = self.show_dimensions()
39
+ # print(layer_dimensions)
40
+
41
+ def show_dimensions(self):
42
+ """显示模型中所有层的名称和对应的维度
43
+
44
+ 这个函数会输出模型中所有层的名称和它们的输出维度,
45
+ 帮助用户选择合适的层来提取特征。
46
+
47
+ Returns:
48
+ layer_dimensions: 包含层名称和维度的字典
49
+ """
50
+ activation = {}
51
+ layer_dimensions = {}
52
+
53
+ def get_activation(name):
54
+ def hook(model, input, output):
55
+ activation[name] = output.detach()
56
+ return hook
57
+
58
+ # 注册钩子到所有层
59
+ handles = []
60
+ for name, module in self.model.named_modules():
61
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
62
+ handles.append(module.register_forward_hook(get_activation(name)))
63
+
64
+ self.model.eval()
65
+ with torch.no_grad():
66
+ # 获取一个batch来分析每层的输出维度
67
+ inputs, _ = next(iter(self.dataloader))
68
+ inputs = inputs.to(self.device)
69
+ _ = self.model(inputs)
70
+
71
+ # 分析所有层的输出维度
72
+ print("\n模型各层的名称和维度:")
73
+ print("-" * 50)
74
+ print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
75
+ print("-" * 50)
76
+
77
+ for name, feat in activation.items():
78
+ if feat is None:
79
+ continue
80
+
81
+ # 获取特征维度(展平后)
82
+ feat_dim = feat.view(feat.size(0), -1).size(1)
83
+ layer_dimensions[name] = feat_dim
84
+ # 打印层信息
85
+ shape_str = str(list(feat.shape))
86
+ print(f"{name:<40} {feat_dim:<15} {shape_str}")
87
+
88
+ print("-" * 50)
89
+ print("注: 特征维度是将输出张量展平后的维度大小")
90
+ print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
91
+ print("例如:layer_name='avg_pool'或layer_name='layer4'等")
92
+
93
+ # 移除所有钩子
94
+ for handle in handles:
95
+ handle.remove()
96
+
97
+ return layer_dimensions
98
+
99
+ def _extract_features_and_predictions(self):
100
+ """提取特征和预测结果
101
+
102
+ Returns:
103
+ features: 高维特征 [样本数, 特征维度]
104
+ predictions: 预测结果 [样本数, 类别数]
105
+ """
106
+ features = []
107
+ predictions = []
108
+ indices = []
109
+ activation = {}
110
+
111
+ def get_activation(name):
112
+ def hook(model, input, output):
113
+ # 只在需要时保存激活值,避免内存浪费
114
+ if name not in activation or activation[name] is None:
115
+ activation[name] = output.detach()
116
+ return hook
117
+
118
+ # 根据层的名称或维度来选择层
119
+
120
+ # 注册钩子到所有层
121
+ handles = []
122
+ for name, module in self.model.named_modules():
123
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
124
+ handles.append(module.register_forward_hook(get_activation(name)))
125
+
126
+ self.model.eval()
127
+ with torch.no_grad():
128
+ # 首先获取一个batch来分析每层的输出维度
129
+ inputs, _ = next(iter(self.dataloader))
130
+ inputs = inputs.to(self.device)
131
+ _ = self.model(inputs)
132
+
133
+ # 如果指定了层名,则直接使用该层
134
+ if self.layer_name is not None:
135
+ if self.layer_name not in activation:
136
+ raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
137
+
138
+ feat = activation[self.layer_name]
139
+ if feat is None:
140
+ raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
141
+
142
+ suitable_layer_name = self.layer_name
143
+ suitable_dim = feat.view(feat.size(0), -1).size(1)
144
+ print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
145
+ else:
146
+ # 找到维度在指定范围内的层
147
+ target_dim_range = (256, 2048)
148
+ suitable_layer_name = None
149
+ suitable_dim = None
150
+
151
+ # 分析所有层的输出维度
152
+ for name, feat in activation.items():
153
+ if feat is None:
154
+ continue
155
+ feat_dim = feat.view(feat.size(0), -1).size(1)
156
+ if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
157
+ suitable_layer_name = name
158
+ suitable_dim = feat_dim
159
+ break
160
+
161
+ if suitable_layer_name is None:
162
+ raise ValueError("没有找到合适维度的特征层")
163
+
164
+ print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
165
+
166
+ # 保存层信息
167
+ layer_info = {
168
+ 'layer_id': suitable_layer_name,
169
+ 'dim': suitable_dim
170
+ }
171
+ layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
172
+ with open(layer_info_path, 'w') as f:
173
+ json.dump(layer_info, f)
174
+
175
+ # 清除第一次运行的激活值
176
+ activation.clear()
177
+
178
+ # 现在处理所有数据
179
+ for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
180
+ inputs = inputs.to(self.device)
181
+ outputs = self.model(inputs) # 获取预测结果
182
+
183
+ # 获取并处理特征
184
+ feat = activation[suitable_layer_name]
185
+ flat_features = torch.flatten(feat, start_dim=1)
186
+ features.append(flat_features.cpu().numpy())
187
+ predictions.append(outputs.cpu().numpy())
188
+
189
+ # 清除本次的激活值
190
+ activation.clear()
191
+
192
+ # 移除所有钩子
193
+ for handle in handles:
194
+ handle.remove()
195
+
196
+ if len(features) > 0:
197
+ features = np.vstack(features)
198
+ predictions = np.vstack(predictions)
199
+ return features, predictions
200
+ else:
201
+ return np.array([]), np.array([])
202
+
203
+ def save_lables_index(self, path):
204
+ """保存标签数据和索引信息
205
+
206
+ Args:
207
+ path: 保存路径
208
+ """
209
+ os.makedirs(path, exist_ok=True)
210
+ labels_path = os.path.join(path, 'labels.npy')
211
+ index_path = os.path.join(path, 'index.json')
212
+
213
+ # 尝试从不同的属性获取标签
214
+ try:
215
+ if hasattr(self.dataloader.dataset, 'targets'):
216
+ # CIFAR10/CIFAR100使用targets属性
217
+ labels = np.array(self.dataloader.dataset.targets)
218
+ elif hasattr(self.dataloader.dataset, 'labels'):
219
+ # 某些数据集使用labels属性
220
+ labels = np.array(self.dataloader.dataset.labels)
221
+ else:
222
+ # 如果上面的方法都不起作用,则从数据加载器中收集标签
223
+ labels = []
224
+ for _, batch_labels in self.dataloader:
225
+ labels.append(batch_labels.numpy())
226
+ labels = np.concatenate(labels)
227
+
228
+ # 保存标签数据
229
+ np.save(labels_path, labels)
230
+ print(f"标签数据已保存到 {labels_path}")
231
+
232
+ # 创建数据集索引
233
+ num_samples = len(labels)
234
+ indices = list(range(num_samples))
235
+
236
+ # 创建索引字典
237
+ index_dict = {
238
+ "train": list(range(50000)), # 所有数据默认为训练集
239
+ "test": list(range(50000, 60000)), # 测试集索引从50000到59999
240
+ "validation": [] # 初始为空
241
+ }
242
+
243
+ # 保存索引到JSON文件
244
+ with open(index_path, 'w') as f:
245
+ json.dump(index_dict, f, indent=4)
246
+
247
+ print(f"数据集索引已保存到 {index_path}")
248
+
249
+ except Exception as e:
250
+ print(f"保存标签和索引时出错: {e}")
251
+
252
+ def save_checkpoint_embeddings_predictions(self, model = None):
253
+ """保存所有数据"""
254
+ if model is not None:
255
+ self.model = model
256
+ # 保存模型权重
257
+ os.makedirs(self.save_dir, exist_ok=True)
258
+ model_path = os.path.join(self.save_dir,'model.pth')
259
+ torch.save(self.model.state_dict(), model_path)
260
+
261
+ if self.auto_save:
262
+ # 提取并保存特征和预测结果
263
+ features, predictions = self._extract_features_and_predictions()
264
+
265
+ # 保存特征
266
+ np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
267
+ # 保存预测结果
268
+ np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
269
+ print("\n保存了以下数据:")
270
+ print(f"- 模型权重: {model_path}")
271
+ print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
272
+ print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
GoogLeNet-CIFAR10/Classification-noisy/scripts/model.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ GoogLeNet in PyTorch.
3
+
4
+ Paper: "Going Deeper with Convolutions"
5
+ Reference: https://arxiv.org/abs/1409.4842
6
+
7
+ 主要特点:
8
+ 1. 使用Inception模块,通过多尺度卷积提取特征
9
+ 2. 采用1x1卷积降维,减少计算量
10
+ 3. 使用全局平均池化代替全连接层
11
+ 4. 引入辅助分类器帮助训练(本实现未包含)
12
+ '''
13
+ import torch
14
+ import torch.nn as nn
15
+
16
+ class Inception(nn.Module):
17
+ '''Inception模块
18
+
19
+ Args:
20
+ in_planes: 输入通道数
21
+ n1x1: 1x1卷积分支的输出通道数
22
+ n3x3red: 3x3卷积分支的降维通道数
23
+ n3x3: 3x3卷积分支的输出通道数
24
+ n5x5red: 5x5卷积分支的降维通道数
25
+ n5x5: 5x5卷积分支的输出通道数
26
+ pool_planes: 池化分支的输出通道数
27
+ '''
28
+ def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
29
+ super(Inception, self).__init__()
30
+
31
+ # 1x1卷积分支
32
+ self.branch1 = nn.Sequential(
33
+ nn.Conv2d(in_planes, n1x1, kernel_size=1),
34
+ nn.BatchNorm2d(n1x1),
35
+ nn.ReLU(True),
36
+ )
37
+
38
+ # 1x1 -> 3x3卷积分支
39
+ self.branch2 = nn.Sequential(
40
+ nn.Conv2d(in_planes, n3x3red, kernel_size=1),
41
+ nn.BatchNorm2d(n3x3red),
42
+ nn.ReLU(True),
43
+ nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
44
+ nn.BatchNorm2d(n3x3),
45
+ nn.ReLU(True),
46
+ )
47
+
48
+ # 1x1 -> 5x5卷积分支(用两个3x3代替)
49
+ self.branch3 = nn.Sequential(
50
+ nn.Conv2d(in_planes, n5x5red, kernel_size=1),
51
+ nn.BatchNorm2d(n5x5red),
52
+ nn.ReLU(True),
53
+ nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
54
+ nn.BatchNorm2d(n5x5),
55
+ nn.ReLU(True),
56
+ nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
57
+ nn.BatchNorm2d(n5x5),
58
+ nn.ReLU(True),
59
+ )
60
+
61
+ # 3x3池化 -> 1x1卷积分支
62
+ self.branch4 = nn.Sequential(
63
+ nn.MaxPool2d(3, stride=1, padding=1),
64
+ nn.Conv2d(in_planes, pool_planes, kernel_size=1),
65
+ nn.BatchNorm2d(pool_planes),
66
+ nn.ReLU(True),
67
+ )
68
+
69
+ def forward(self, x):
70
+ '''前向传播,将四个分支的输出在通道维度上拼接'''
71
+ b1 = self.branch1(x)
72
+ b2 = self.branch2(x)
73
+ b3 = self.branch3(x)
74
+ b4 = self.branch4(x)
75
+ return torch.cat([b1, b2, b3, b4], 1)
76
+
77
+
78
+ class GoogLeNet(nn.Module):
79
+ '''GoogLeNet/Inception v1网络
80
+
81
+ 特点:
82
+ 1. 使用Inception模块构建深层网络
83
+ 2. 通过1x1卷积降维减少计算量
84
+ 3. 使用全局平均池化代替全连接层减少参数量
85
+ '''
86
+ def __init__(self, num_classes=10):
87
+ super(GoogLeNet, self).__init__()
88
+
89
+ # 第一阶段:标准卷积层
90
+ self.pre_layers = nn.Sequential(
91
+ nn.Conv2d(3, 192, kernel_size=3, padding=1),
92
+ nn.BatchNorm2d(192),
93
+ nn.ReLU(True),
94
+ )
95
+
96
+ # 第二阶段:2个Inception模块
97
+ self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) # 输出通道:256
98
+ self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) # 输出通道:480
99
+
100
+ # 最大池化层
101
+ self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
102
+
103
+ # 第三阶段:5个Inception模块
104
+ self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) # 输出通道:512
105
+ self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) # 输出通道:512
106
+ self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) # 输出通道:512
107
+ self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) # 输出通道:528
108
+ self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) # 输出通道:832
109
+
110
+ # 第四阶段:2个Inception模块
111
+ self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) # 输出通道:832
112
+ self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) # 输出通道:1024
113
+
114
+ # 全局平均池化和分类器
115
+ self.avgpool = nn.AvgPool2d(8, stride=1)
116
+ self.linear = nn.Linear(1024, num_classes)
117
+
118
+ def forward(self, x):
119
+ # 第一阶段
120
+ out = self.pre_layers(x)
121
+
122
+ # 第二阶段
123
+ out = self.a3(out)
124
+ out = self.b3(out)
125
+ out = self.maxpool(out)
126
+
127
+ # 第三阶段
128
+ out = self.a4(out)
129
+ out = self.b4(out)
130
+ out = self.c4(out)
131
+ out = self.d4(out)
132
+ out = self.e4(out)
133
+ out = self.maxpool(out)
134
+
135
+ # 第四阶段
136
+ out = self.a5(out)
137
+ out = self.b5(out)
138
+
139
+ # 分类器
140
+ out = self.avgpool(out)
141
+ out = out.view(out.size(0), -1)
142
+ out = self.linear(out)
143
+ return out
144
+
145
+ def feature(self, x):
146
+ # 第一阶段
147
+ out = self.pre_layers(x)
148
+
149
+ # 第二阶段
150
+ out = self.a3(out)
151
+ out = self.b3(out)
152
+ out = self.maxpool(out)
153
+
154
+ # 第三阶段
155
+ out = self.a4(out)
156
+ out = self.b4(out)
157
+ out = self.c4(out)
158
+ out = self.d4(out)
159
+ out = self.e4(out)
160
+ out = self.maxpool(out)
161
+
162
+ # 第四阶段
163
+ out = self.a5(out)
164
+ out = self.b5(out)
165
+
166
+ # 分类器
167
+ out = self.avgpool(out)
168
+ return out
169
+
170
+ def prediction(self, out):
171
+ out = out.view(out.size(0), -1)
172
+ out = self.linear(out)
173
+ return out
174
+
175
+ def test():
176
+ """测试函数"""
177
+ net = GoogLeNet()
178
+ x = torch.randn(1, 3, 32, 32)
179
+ y = net(x)
180
+ print(y.size())
181
+
182
+ # 打印模型结构
183
+ from torchinfo import summary
184
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
185
+ net = net.to(device)
186
+ summary(net, (1, 3, 32, 32))
187
+
188
+ if __name__ == '__main__':
189
+ test()
GoogLeNet-CIFAR10/Classification-noisy/scripts/preview_noise.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ """
5
+ 噪声效果预览脚本:展示不同类型和强度的噪声对图像的影响
6
+ """
7
+
8
+ import os
9
+ import torch
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import torchvision
13
+ import torchvision.transforms as transforms
14
+ import random
15
+
16
+ def add_noise_for_preview(image, noise_type, level):
17
+ """向图像添加不同类型的噪声的预览
18
+
19
+ Args:
20
+ image: 输入图像 (Tensor: C x H x W),范围[0,1]
21
+ noise_type: 噪声类型 (int, 1-3)
22
+ level: 噪声强度 (float)
23
+
24
+ Returns:
25
+ noisy_image: 添加噪声后的图像 (Tensor: C x H x W)
26
+ """
27
+ # 将图像从Tensor转为Numpy数组
28
+ img_np = image.cpu().numpy()
29
+ img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
30
+
31
+ # 根据噪声类型添加噪声
32
+ if noise_type == 1: # 高斯噪声
33
+ noise = np.random.normal(0, level, img_np.shape)
34
+ noisy_img = img_np + noise
35
+ noisy_img = np.clip(noisy_img, 0, 1)
36
+
37
+ elif noise_type == 2: # 椒盐噪声
38
+ # 创建掩码,确定哪些像素将变为椒盐噪声
39
+ noisy_img = img_np.copy() # 创建副本而不是直接修改原图
40
+ mask = np.random.random(img_np.shape[:2])
41
+ # 椒噪声 (黑点)
42
+ noisy_img[mask < level/2] = 0
43
+ # 盐噪声 (白点)
44
+ noisy_img[mask > 1 - level/2] = 1
45
+
46
+ elif noise_type == 3: # 泊松噪声
47
+ # 确保输入值为正数
48
+ lam = np.maximum(img_np * 10.0, 0.0001) # 避免负值和零值
49
+ noisy_img = np.random.poisson(lam) / 10.0
50
+ noisy_img = np.clip(noisy_img, 0, 1)
51
+
52
+ else: # 默认返回原图像
53
+ noisy_img = img_np
54
+
55
+ # 将噪声图像从Numpy数组转回Tensor
56
+ noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
57
+ noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32))
58
+ return noisy_tensor
59
+
60
+ def preview_noise_effects(num_samples=5, save_dir='../results'):
61
+ """展示不同类型和强度噪声的对比效果
62
+
63
+ Args:
64
+ num_samples: 要展示的样本数量
65
+ save_dir: 保存结果的目录
66
+ """
67
+ # 创建保存目录
68
+ os.makedirs(save_dir, exist_ok=True)
69
+
70
+ # 加载CIFAR10数据集
71
+ transform = transforms.Compose([transforms.ToTensor()])
72
+ testset = torchvision.datasets.CIFAR10(root='../dataset', train=False, download=True, transform=transform)
73
+
74
+ # 随机选择几个样本进行展示
75
+ indices = random.sample(range(len(testset)), num_samples)
76
+
77
+ # 定义噪声类型和强度
78
+ noise_configs = [
79
+ {"name": "高斯噪声(强)", "type": 1, "level": 0.2},
80
+ {"name": "高斯噪声(弱)", "type": 1, "level": 0.1},
81
+ {"name": "椒盐噪声(强)", "type": 2, "level": 0.15},
82
+ {"name": "椒盐噪声(弱)", "type": 2, "level": 0.05},
83
+ {"name": "泊松噪声(强)", "type": 3, "level": 1.0},
84
+ {"name": "泊松噪声(弱)", "type": 3, "level": 0.5}
85
+ ]
86
+
87
+ # 获取CIFAR10类别名称
88
+ classes = ('飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车')
89
+
90
+ # 对每个样本应用不同类型的噪声并展示
91
+ for i, idx in enumerate(indices):
92
+ # 获取原始图像和标签
93
+ img, label = testset[idx]
94
+
95
+ # 创建一个子图网格
96
+ fig, axes = plt.subplots(1, len(noise_configs) + 1, figsize=(18, 3))
97
+ plt.subplots_adjust(wspace=0.3)
98
+
99
+ # 显示原始图像
100
+ img_np = img.permute(1, 2, 0).cpu().numpy()
101
+ axes[0].imshow(img_np)
102
+ axes[0].set_title(f"原始图像\n类别: {classes[label]}")
103
+ axes[0].axis('off')
104
+
105
+ # 应用不同类型的噪声并显示
106
+ for j, noise_config in enumerate(noise_configs):
107
+ noisy_img = add_noise_for_preview(img, noise_config["type"], noise_config["level"])
108
+ noisy_img_np = noisy_img.permute(1, 2, 0).cpu().numpy()
109
+ axes[j+1].imshow(np.clip(noisy_img_np, 0, 1))
110
+ axes[j+1].set_title(noise_config["name"])
111
+ axes[j+1].axis('off')
112
+
113
+ # 保存图像
114
+ plt.tight_layout()
115
+ plt.savefig(os.path.join(save_dir, f'noise_preview_{i+1}.png'), dpi=150)
116
+ plt.close()
117
+
118
+ print(f"噪声对比预览已保存到 {save_dir} 目录")
119
+
120
+ if __name__ == "__main__":
121
+ # 预览噪声效果
122
+ preview_noise_effects(num_samples=10, save_dir='.')
GoogLeNet-CIFAR10/Classification-noisy/scripts/train.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import yaml
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.optim as optim
7
+ import time
8
+ import logging
9
+ import numpy as np
10
+ from tqdm import tqdm
11
+ from dataset_utils import get_noisy_cifar10_dataloaders
12
+ from model import GoogLeNet
13
+ from get_representation import time_travel_saver
14
+
15
+
16
+ def setup_logger(log_file):
17
+ """配置日志记录器,如果日志文件存在则覆盖
18
+
19
+ Args:
20
+ log_file: 日志文件路径
21
+
22
+ Returns:
23
+ logger: 配置好的日志记录器
24
+ """
25
+ # 创建logger
26
+ logger = logging.getLogger('train')
27
+ logger.setLevel(logging.INFO)
28
+
29
+ # 移除现有的处理器
30
+ if logger.hasHandlers():
31
+ logger.handlers.clear()
32
+
33
+ # 创建文件处理器,使用'w'模式覆盖现有文件
34
+ fh = logging.FileHandler(log_file, mode='w')
35
+ fh.setLevel(logging.INFO)
36
+
37
+ # 创建控制台处理器
38
+ ch = logging.StreamHandler()
39
+ ch.setLevel(logging.INFO)
40
+
41
+ # 创建格式器
42
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
43
+ fh.setFormatter(formatter)
44
+ ch.setFormatter(formatter)
45
+
46
+ # 添加处理器
47
+ logger.addHandler(fh)
48
+ logger.addHandler(ch)
49
+
50
+ return logger
51
+
52
+ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
53
+ save_dir='./epochs', model_name='model', interval=1):
54
+ """通用的模型训练函数
55
+ Args:
56
+ model: 要训练的模型
57
+ trainloader: 训练数据加载器
58
+ testloader: 测试数据加载器
59
+ epochs: 训练轮数
60
+ lr: 学习率
61
+ device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
62
+ save_dir: 模型保存目录
63
+ model_name: 模型名称
64
+ interval: 模型保存间隔
65
+ """
66
+ # 检查并设置GPU设备
67
+ if not torch.cuda.is_available():
68
+ print("CUDA不可用,将使用CPU训练")
69
+ device = 'cpu'
70
+ elif not device.startswith('cuda:'):
71
+ device = f'cuda:0'
72
+
73
+ # 确保device格式正确
74
+ if device.startswith('cuda:'):
75
+ gpu_id = int(device.split(':')[1])
76
+ if gpu_id >= torch.cuda.device_count():
77
+ print(f"GPU {gpu_id} 不可用,将使用GPU 0")
78
+ device = 'cuda:0'
79
+
80
+ # 设置保存目录
81
+ if not os.path.exists(save_dir):
82
+ os.makedirs(save_dir)
83
+
84
+ # 设置日志文件路径
85
+ log_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'epochs', 'train.log')
86
+ if not os.path.exists(os.path.dirname(log_file)):
87
+ os.makedirs(os.path.dirname(log_file))
88
+
89
+ logger = setup_logger(log_file)
90
+
91
+ # 损失函数和优化器
92
+ criterion = nn.CrossEntropyLoss()
93
+ optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
94
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
95
+
96
+ # 移动模型到指定设备
97
+ model = model.to(device)
98
+ best_acc = 0
99
+ start_time = time.time()
100
+
101
+ logger.info(f'开始训练 {model_name}')
102
+ logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
103
+
104
+ for epoch in range(epochs):
105
+ # 训练阶段
106
+ model.train()
107
+ train_loss = 0
108
+ correct = 0
109
+ total = 0
110
+
111
+ train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
112
+ for batch_idx, (inputs, targets) in enumerate(train_pbar):
113
+ inputs, targets = inputs.to(device), targets.to(device)
114
+ optimizer.zero_grad()
115
+ outputs = model(inputs)
116
+ loss = criterion(outputs, targets)
117
+ loss.backward()
118
+ optimizer.step()
119
+
120
+ train_loss += loss.item()
121
+ _, predicted = outputs.max(1)
122
+ total += targets.size(0)
123
+ correct += predicted.eq(targets).sum().item()
124
+
125
+ # 更新进度条
126
+ train_pbar.set_postfix({
127
+ 'loss': f'{train_loss/(batch_idx+1):.3f}',
128
+ 'acc': f'{100.*correct/total:.2f}%'
129
+ })
130
+
131
+ # 保存训练阶段的准确率
132
+ train_acc = 100.*correct/total
133
+ train_correct = correct
134
+ train_total = total
135
+
136
+ # 测试阶段
137
+ model.eval()
138
+ test_loss = 0
139
+ correct = 0
140
+ total = 0
141
+
142
+ test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
143
+ with torch.no_grad():
144
+ for batch_idx, (inputs, targets) in enumerate(test_pbar):
145
+ inputs, targets = inputs.to(device), targets.to(device)
146
+ outputs = model(inputs)
147
+ loss = criterion(outputs, targets)
148
+
149
+ test_loss += loss.item()
150
+ _, predicted = outputs.max(1)
151
+ total += targets.size(0)
152
+ correct += predicted.eq(targets).sum().item()
153
+
154
+ # 更新进度条
155
+ test_pbar.set_postfix({
156
+ 'loss': f'{test_loss/(batch_idx+1):.3f}',
157
+ 'acc': f'{100.*correct/total:.2f}%'
158
+ })
159
+
160
+ # 计算测试精度
161
+ acc = 100.*correct/total
162
+
163
+ # 记录训练和测试的损失与准确率
164
+ logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
165
+ f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
166
+
167
+ # 保存可视化训练过程所需要的文件
168
+ if (epoch + 1) % interval == 0 or (epoch == 0):
169
+ # 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集
170
+ from torch.utils.data import ConcatDataset
171
+
172
+ def custom_collate_fn(batch):
173
+ # 确保所有数据都是张量
174
+ data = [item[0] for item in batch] # 图像
175
+ target = [item[1] for item in batch] # 标签
176
+
177
+ # 将列表转换为张量
178
+ data = torch.stack(data, 0)
179
+ target = torch.tensor(target)
180
+
181
+ return [data, target]
182
+
183
+ # 合并训练集和测试集
184
+ combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset])
185
+
186
+ # 创建顺序数据加载器
187
+ ordered_loader = torch.utils.data.DataLoader(
188
+ combined_dataset, # 使用合并后的数据集
189
+ batch_size=trainloader.batch_size,
190
+ shuffle=False, # 确保顺序加载
191
+ num_workers=trainloader.num_workers,
192
+ collate_fn=custom_collate_fn # 使用自定义的collate函数
193
+ )
194
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
195
+ save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name,
196
+ show=True, layer_name='avgpool', auto_save_embedding=True)
197
+ save_model.save_checkpoint_embeddings_predictions()
198
+ if epoch == 0:
199
+ save_model.save_lables_index(path = "../dataset")
200
+
201
+ scheduler.step()
202
+
203
+ logger.info('训练完成!')
204
+
205
+
206
+ def noisy_train():
207
+ """训练带噪声的模型
208
+
209
+ Returns:
210
+ model: 训练好的模型
211
+ """
212
+ # 加载配置文件
213
+ config_path = './train.yaml'
214
+ with open(config_path, 'r') as f:
215
+ config = yaml.safe_load(f)
216
+
217
+ # 设置设备
218
+ device = f"cuda:{config.get('gpu', 0)}"
219
+ # 加载添加噪音后的CIFAR10数据集
220
+ batch_size = config.get('batch_size', 128)
221
+ trainloader, testloader = get_noisy_cifar10_dataloaders(batch_size=batch_size)
222
+
223
+ # 初始化模型
224
+ model = GoogLeNet(num_classes=10).to(device)
225
+
226
+ # 训练参数
227
+ epochs = config.get('epochs', 200)
228
+ lr = config.get('learning_rate', 0.1)
229
+ save_dir = os.path.join('..', 'epochs')
230
+ interval = config.get('interval', 2)
231
+ os.makedirs(save_dir, exist_ok=True)
232
+
233
+ # 训练模型
234
+ model = train_model(
235
+ model=model,
236
+ trainloader=trainloader,
237
+ testloader=testloader,
238
+ epochs=epochs,
239
+ lr=lr,
240
+ device=device,
241
+ save_dir=save_dir,
242
+ model_name='GoogLeNet_noisy',
243
+ interval=interval
244
+ )
245
+
246
+ print(f"训练完成,模型已保存到 {save_dir}")
247
+ return model
248
+
249
+ # 主函数
250
+ if __name__ == '__main__':
251
+ noisy_train()
GoogLeNet-CIFAR10/Classification-noisy/scripts/train.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ batch_size: 128
2
+ num_workers: 2
3
+ dataset_path: ../dataset
4
+ epochs: 50
5
+ gpu: 0
6
+ lr: 0.1
7
+ interval: 2
8
+ # 噪声实验配置
9
+ noise_types:
10
+ # 不同标签使用不同噪声类型
11
+ # 0: 无噪声
12
+ # 1: 无噪声
13
+ # 2: 0.3的数据加强高斯噪声
14
+ # 3: 0.1的数据加弱高斯噪声
15
+ # 4: 0.3的数据加强椒盐噪声
16
+ # 5: 0.1的数据加弱椒盐噪声
17
+ # 6: 0.3的数据加强泊松噪声
18
+ # 7: 0.1的数据加弱泊松噪声
19
+ # 8: 无噪声
20
+ # 9: 无噪声
21
+ noise_levels:
22
+ # 每种噪声类型的强度级别
23
+ gaussian: [0.1, 0.2] # 高斯噪声标准差参数
24
+ salt_pepper: [0.05, 0.1] # 椒盐噪声受影响像素比例
25
+ poisson: [1] # 泊松噪声没有强度参数
GoogLeNet-CIFAR10/Classification-normal/dataset/index.json ADDED
The diff for this file is too large to render. See raw diff
 
GoogLeNet-CIFAR10/Classification-normal/dataset/info.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model": "GoogLeNet",
3
+ "classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
4
+ }
Image/LeNet5/model/0/epoch10/subject_model.pth → GoogLeNet-CIFAR10/Classification-normal/dataset/labels.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afaf5726ee219183b237b7a52fa33a9b6b942069d8042bd09ffd66a5f133cfd8
3
- size 252044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13128de212014e257f241a6f6ea7d97f157e02c814dc70456d692fd18a85d32
3
+ size 480128
GoogLeNet-CIFAR10/Classification-normal/readme.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GoogLeNet-CIFAR10 训练与特征提取
2
+
3
+ 这个项目实现了GoogLeNet模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
4
+
5
+ ## time_travel_saver数据提取器
6
+ ```python
7
+ #保存可视化训练过程所需要的文件
8
+ if (epoch + 1) % interval == 0 or (epoch == 0):
9
+ # 创建一个专门用于收集embedding的顺序dataloader
10
+ ordered_trainloader = torch.utils.data.DataLoader(
11
+ trainloader.dataset,
12
+ batch_size=trainloader.batch_size,
13
+ shuffle=False,
14
+ num_workers=trainloader.num_workers
15
+ )
16
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
17
+ save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
18
+ show=True, layer_name='avg_pool', auto_save_embedding=True)
19
+ #show:是否显示模型的维度信息
20
+ #layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
21
+ #auto_save_embedding:是否自动保存特征向量 must be True
22
+ save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
23
+ if epoch == 0:
24
+ save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
25
+ ```
26
+
27
+
28
+ ## 项目结构
29
+
30
+ - `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
31
+ - `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
32
+ - `./model/`:保存训练好的模型权重
33
+ - `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
34
+
35
+ ## 使用方法
36
+
37
+ 1. 配置 `train.yaml` 文件设置训练参数
38
+ 2. 执行训练脚本:
39
+ ```
40
+ python train.py
41
+ ```
42
+ 3. 训练完成后,可以在以下位置找到相关数据:
43
+ - 模型权重:`./epochs/epoch_{n}/model.pth`
44
+ - 特征向量:`./epochs/epoch_{n}/embeddings.npy`
45
+ - 预测结果:`./epochs/epoch_{n}/predictions.npy`
46
+ - 标签数据:`./dataset/labels.npy`
47
+ - 数据索引:`./dataset/index.json`
48
+
49
+ ## 数据格式
50
+
51
+ - `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
52
+ - `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
53
+ - `labels.npy`:形状为 [n_samples] 的真实标签
54
+ - `index.json`:包含训练集、测试集和验证集的索引信息
GoogLeNet-CIFAR10/Classification-normal/scripts/dataset_utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ import os
5
+
6
+ #加载数据集
7
+
8
+ def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
9
+ """获取CIFAR10数据集的数据加载器
10
+
11
+ Args:
12
+ batch_size: 批次大小
13
+ num_workers: 数据加载的工作进程数
14
+ local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
15
+
16
+ Returns:
17
+ trainloader: 训练数据加载器
18
+ testloader: 测试数据加载器
19
+ """
20
+ # 数据预处理
21
+ transform_train = transforms.Compose([
22
+ transforms.RandomCrop(32, padding=4),
23
+ transforms.RandomHorizontalFlip(),
24
+ transforms.ToTensor(),
25
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
26
+ ])
27
+
28
+ transform_test = transforms.Compose([
29
+ transforms.ToTensor(),
30
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
31
+ ])
32
+
33
+ # 设置数据集路径
34
+ if local_dataset_path:
35
+ print(f"使用本地数据集: {local_dataset_path}")
36
+ # 检查数据集路径是否有数据集,没有的话则下载
37
+ cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
38
+ download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
39
+ dataset_path = local_dataset_path
40
+ else:
41
+ print("未指定本地数据集路径,将下载数据集")
42
+ download = True
43
+ dataset_path = '../dataset'
44
+
45
+ # 创建数据集路径
46
+ if not os.path.exists(dataset_path):
47
+ os.makedirs(dataset_path)
48
+
49
+ trainset = torchvision.datasets.CIFAR10(
50
+ root=dataset_path, train=True, download=download, transform=transform_train)
51
+ trainloader = torch.utils.data.DataLoader(
52
+ trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
53
+
54
+ testset = torchvision.datasets.CIFAR10(
55
+ root=dataset_path, train=False, download=download, transform=transform_test)
56
+ testloader = torch.utils.data.DataLoader(
57
+ testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
58
+
59
+ return trainloader, testloader
GoogLeNet-CIFAR10/Classification-normal/scripts/get_raw_data.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
2
+
3
+ import os
4
+ import numpy as np
5
+ import torchvision
6
+ import torchvision.transforms as transforms
7
+ from PIL import Image
8
+ from tqdm import tqdm
9
+
10
+ def unpickle(file):
11
+ """读取CIFAR-10数据文件"""
12
+ import pickle
13
+ with open(file, 'rb') as fo:
14
+ dict = pickle.load(fo, encoding='bytes')
15
+ return dict
16
+
17
+ def save_images_from_cifar10(dataset_path, save_dir):
18
+ """从CIFAR-10数据集中保存图像
19
+
20
+ Args:
21
+ dataset_path: CIFAR-10数据集路径
22
+ save_dir: 图像保存路径
23
+ """
24
+ # 创建保存目录
25
+ os.makedirs(save_dir, exist_ok=True)
26
+
27
+ # 获取训练集数据
28
+ train_data = []
29
+ train_labels = []
30
+
31
+ # 读取训练数据
32
+ for i in range(1, 6):
33
+ batch_file = os.path.join(dataset_path, f'data_batch_{i}')
34
+ if os.path.exists(batch_file):
35
+ print(f"读取训练批次 {i}")
36
+ batch = unpickle(batch_file)
37
+ train_data.append(batch[b'data'])
38
+ train_labels.extend(batch[b'labels'])
39
+
40
+ # 合并所有训练数据
41
+ if train_data:
42
+ train_data = np.vstack(train_data)
43
+ train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
44
+
45
+ # 读取测试数据
46
+ test_file = os.path.join(dataset_path, 'test_batch')
47
+ if os.path.exists(test_file):
48
+ print("读取测试数据")
49
+ test_batch = unpickle(test_file)
50
+ test_data = test_batch[b'data']
51
+ test_labels = test_batch[b'labels']
52
+ test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
53
+ else:
54
+ test_data = []
55
+ test_labels = []
56
+
57
+ # 合并训练和测试数据
58
+ all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
59
+ all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
60
+
61
+ # 保存图像
62
+ print(f"保存 {len(all_data)} 张图像...")
63
+ for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
64
+ img = Image.fromarray(img)
65
+ img.save(os.path.join(save_dir, f"{i}.png"))
66
+
67
+ print(f"完成! {len(all_data)} 张图像已保存到 {save_dir}")
68
+
69
+ if __name__ == "__main__":
70
+ # 设置路径
71
+ dataset_path = "../dataset/cifar-10-batches-py"
72
+ save_dir = "../dataset/raw_data"
73
+
74
+ # 检查数据集是否存在,如果不存在则下载
75
+ if not os.path.exists(dataset_path):
76
+ print("数据集不存在,正在下载...")
77
+ os.makedirs("../dataset", exist_ok=True)
78
+ transform = transforms.Compose([transforms.ToTensor()])
79
+ trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
80
+
81
+ # 保存图像
82
+ save_images_from_cifar10(dataset_path, save_dir)
GoogLeNet-CIFAR10/Classification-normal/scripts/get_representation.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import os
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ class time_travel_saver:
9
+ """可视化数据提取器
10
+
11
+ 用于保存模型训练过程中的各种数据,包括:
12
+ 1. 模型权重 (.pth)
13
+ 2. 高维特征 (representation/*.npy)
14
+ 3. 预测结果 (prediction/*.npy)
15
+ 4. 标签数据 (label/labels.npy)
16
+ """
17
+
18
+ def __init__(self, model, dataloader, device, save_dir, model_name,
19
+ auto_save_embedding=False, layer_name=None,show = False):
20
+ """初始化
21
+
22
+ Args:
23
+ model: 要保存的模型实例
24
+ dataloader: 数据加载器(必须是顺序加载的)
25
+ device: 计算设备(cpu or gpu)
26
+ save_dir: 保存根目录
27
+ model_name: 模型名称
28
+ """
29
+ self.model = model
30
+ self.dataloader = dataloader
31
+ self.device = device
32
+ self.save_dir = save_dir
33
+ self.model_name = model_name
34
+ self.auto_save = auto_save_embedding
35
+ self.layer_name = layer_name
36
+
37
+ if show and not layer_name:
38
+ layer_dimensions = self.show_dimensions()
39
+ # print(layer_dimensions)
40
+
41
+ def show_dimensions(self):
42
+ """显示模型中所有层的名称和对应的维度
43
+
44
+ 这个函数会输出模型中所有层的名称和它们的输出维度,
45
+ 帮助用户选择合适的层来提取特征。
46
+
47
+ Returns:
48
+ layer_dimensions: 包含层名称和维度的字典
49
+ """
50
+ activation = {}
51
+ layer_dimensions = {}
52
+
53
+ def get_activation(name):
54
+ def hook(model, input, output):
55
+ activation[name] = output.detach()
56
+ return hook
57
+
58
+ # 注册钩子到所有层
59
+ handles = []
60
+ for name, module in self.model.named_modules():
61
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
62
+ handles.append(module.register_forward_hook(get_activation(name)))
63
+
64
+ self.model.eval()
65
+ with torch.no_grad():
66
+ # 获取一个batch来分析每层的输出维度
67
+ inputs, _ = next(iter(self.dataloader))
68
+ inputs = inputs.to(self.device)
69
+ _ = self.model(inputs)
70
+
71
+ # 分析所有层的输出维度
72
+ print("\n模型各层的名称和维度:")
73
+ print("-" * 50)
74
+ print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
75
+ print("-" * 50)
76
+
77
+ for name, feat in activation.items():
78
+ if feat is None:
79
+ continue
80
+
81
+ # 获取特征维度(展平后)
82
+ feat_dim = feat.view(feat.size(0), -1).size(1)
83
+ layer_dimensions[name] = feat_dim
84
+ # 打印层信息
85
+ shape_str = str(list(feat.shape))
86
+ print(f"{name:<40} {feat_dim:<15} {shape_str}")
87
+
88
+ print("-" * 50)
89
+ print("注: 特征维度是将输出张量展平后的维度大小")
90
+ print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
91
+ print("例如:layer_name='avg_pool'或layer_name='layer4'等")
92
+
93
+ # 移除所有钩子
94
+ for handle in handles:
95
+ handle.remove()
96
+
97
+ return layer_dimensions
98
+
99
+ def _extract_features_and_predictions(self):
100
+ """提取特征和预测结果
101
+
102
+ Returns:
103
+ features: 高维特征 [样本数, 特征维度]
104
+ predictions: 预测结果 [样本数, 类别数]
105
+ """
106
+ features = []
107
+ predictions = []
108
+ indices = []
109
+ activation = {}
110
+
111
+ def get_activation(name):
112
+ def hook(model, input, output):
113
+ # 只在需要时保存激活值,避免内存浪费
114
+ if name not in activation or activation[name] is None:
115
+ activation[name] = output.detach()
116
+ return hook
117
+
118
+ # 根据层的名称或维度来选择层
119
+
120
+ # 注册钩子到所有层
121
+ handles = []
122
+ for name, module in self.model.named_modules():
123
+ if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
124
+ handles.append(module.register_forward_hook(get_activation(name)))
125
+
126
+ self.model.eval()
127
+ with torch.no_grad():
128
+ # 首先获取一个batch来分析每层的输出维度
129
+ inputs, _ = next(iter(self.dataloader))
130
+ inputs = inputs.to(self.device)
131
+ _ = self.model(inputs)
132
+
133
+ # 如果指定了层名,则直接使用该层
134
+ if self.layer_name is not None:
135
+ if self.layer_name not in activation:
136
+ raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
137
+
138
+ feat = activation[self.layer_name]
139
+ if feat is None:
140
+ raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
141
+
142
+ suitable_layer_name = self.layer_name
143
+ suitable_dim = feat.view(feat.size(0), -1).size(1)
144
+ print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
145
+ else:
146
+ # 找到维度在指定范围内的层
147
+ target_dim_range = (256, 2048)
148
+ suitable_layer_name = None
149
+ suitable_dim = None
150
+
151
+ # 分析所有层的输出维度
152
+ for name, feat in activation.items():
153
+ if feat is None:
154
+ continue
155
+ feat_dim = feat.view(feat.size(0), -1).size(1)
156
+ if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
157
+ suitable_layer_name = name
158
+ suitable_dim = feat_dim
159
+ break
160
+
161
+ if suitable_layer_name is None:
162
+ raise ValueError("没有找到合适维度的特征层")
163
+
164
+ print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
165
+
166
+ # 保存层信息
167
+ layer_info = {
168
+ 'layer_id': suitable_layer_name,
169
+ 'dim': suitable_dim
170
+ }
171
+ layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
172
+ with open(layer_info_path, 'w') as f:
173
+ json.dump(layer_info, f)
174
+
175
+ # 清除第一次运行的激活值
176
+ activation.clear()
177
+
178
+ # 现在处理所有数据
179
+ for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
180
+ inputs = inputs.to(self.device)
181
+ outputs = self.model(inputs) # 获取预测结果
182
+
183
+ # 获取并处理特征
184
+ feat = activation[suitable_layer_name]
185
+ flat_features = torch.flatten(feat, start_dim=1)
186
+ features.append(flat_features.cpu().numpy())
187
+ predictions.append(outputs.cpu().numpy())
188
+
189
+ # 清除本次的激活值
190
+ activation.clear()
191
+
192
+ # 移除所有钩子
193
+ for handle in handles:
194
+ handle.remove()
195
+
196
+ if len(features) > 0:
197
+ features = np.vstack(features)
198
+ predictions = np.vstack(predictions)
199
+ return features, predictions
200
+ else:
201
+ return np.array([]), np.array([])
202
+
203
+ def save_lables_index(self, path):
204
+ """保存标签数据和索引信息
205
+
206
+ Args:
207
+ path: 保存路径
208
+ """
209
+ os.makedirs(path, exist_ok=True)
210
+ labels_path = os.path.join(path, 'labels.npy')
211
+ index_path = os.path.join(path, 'index.json')
212
+
213
+ # 尝试从不同的属性获取标签
214
+ try:
215
+ if hasattr(self.dataloader.dataset, 'targets'):
216
+ # CIFAR10/CIFAR100使用targets属性
217
+ labels = np.array(self.dataloader.dataset.targets)
218
+ elif hasattr(self.dataloader.dataset, 'labels'):
219
+ # 某些数据集使用labels属性
220
+ labels = np.array(self.dataloader.dataset.labels)
221
+ else:
222
+ # 如果上面的方法都不起作用,则从数据加载器中收集标签
223
+ labels = []
224
+ for _, batch_labels in self.dataloader:
225
+ labels.append(batch_labels.numpy())
226
+ labels = np.concatenate(labels)
227
+
228
+ # 保存标签数据
229
+ np.save(labels_path, labels)
230
+ print(f"标签数据已保存到 {labels_path}")
231
+
232
+ # 创建数据集索引
233
+ num_samples = len(labels)
234
+ indices = list(range(num_samples))
235
+
236
+ # 创建索引字典
237
+ index_dict = {
238
+ "train": list(range(50000)), # 所有数据默认为训练集
239
+ "test": list(range(50000, 60000)), # 测试集索引从50000到59999
240
+ "validation": [] # 初始为空
241
+ }
242
+
243
+ # 保存索引到JSON文件
244
+ with open(index_path, 'w') as f:
245
+ json.dump(index_dict, f, indent=4)
246
+
247
+ print(f"数据集索引已保存到 {index_path}")
248
+
249
+ except Exception as e:
250
+ print(f"保存标签和索引时出错: {e}")
251
+
252
+ def save_checkpoint_embeddings_predictions(self, model = None):
253
+ """保存所有数据"""
254
+ if model is not None:
255
+ self.model = model
256
+ # 保存模型权重
257
+ os.makedirs(self.save_dir, exist_ok=True)
258
+ model_path = os.path.join(self.save_dir,'model.pth')
259
+ torch.save(self.model.state_dict(), model_path)
260
+
261
+ if self.auto_save:
262
+ # 提取并保存特征和预测结果
263
+ features, predictions = self._extract_features_and_predictions()
264
+
265
+ # 保存特征
266
+ np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
267
+ # 保存预测结果
268
+ np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
269
+ print("\n保存了以下数据:")
270
+ print(f"- 模型权重: {model_path}")
271
+ print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
272
+ print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
GoogLeNet-CIFAR10/Classification-normal/scripts/model.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ GoogLeNet in PyTorch.
3
+
4
+ Paper: "Going Deeper with Convolutions"
5
+ Reference: https://arxiv.org/abs/1409.4842
6
+
7
+ 主要特点:
8
+ 1. 使用Inception模块,通过多尺度卷积提取特征
9
+ 2. 采用1x1卷积降维,减少计算量
10
+ 3. 使用全局平均池化代替全连接层
11
+ 4. 引入辅助分类器帮助训练(本实现未包含)
12
+ '''
13
+ import torch
14
+ import torch.nn as nn
15
+
16
+ class Inception(nn.Module):
17
+ '''Inception模块
18
+
19
+ Args:
20
+ in_planes: 输入通道数
21
+ n1x1: 1x1卷积分支的输出通道数
22
+ n3x3red: 3x3卷积分支的降维通道数
23
+ n3x3: 3x3卷积分支的输出通道数
24
+ n5x5red: 5x5卷积分支的降维通道数
25
+ n5x5: 5x5卷积分支的输出通道数
26
+ pool_planes: 池化分支的输出通道数
27
+ '''
28
+ def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
29
+ super(Inception, self).__init__()
30
+
31
+ # 1x1卷积分支
32
+ self.branch1 = nn.Sequential(
33
+ nn.Conv2d(in_planes, n1x1, kernel_size=1),
34
+ nn.BatchNorm2d(n1x1),
35
+ nn.ReLU(True),
36
+ )
37
+
38
+ # 1x1 -> 3x3卷积分支
39
+ self.branch2 = nn.Sequential(
40
+ nn.Conv2d(in_planes, n3x3red, kernel_size=1),
41
+ nn.BatchNorm2d(n3x3red),
42
+ nn.ReLU(True),
43
+ nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
44
+ nn.BatchNorm2d(n3x3),
45
+ nn.ReLU(True),
46
+ )
47
+
48
+ # 1x1 -> 5x5卷积分支(用两个3x3代替)
49
+ self.branch3 = nn.Sequential(
50
+ nn.Conv2d(in_planes, n5x5red, kernel_size=1),
51
+ nn.BatchNorm2d(n5x5red),
52
+ nn.ReLU(True),
53
+ nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
54
+ nn.BatchNorm2d(n5x5),
55
+ nn.ReLU(True),
56
+ nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
57
+ nn.BatchNorm2d(n5x5),
58
+ nn.ReLU(True),
59
+ )
60
+
61
+ # 3x3池化 -> 1x1卷积分支
62
+ self.branch4 = nn.Sequential(
63
+ nn.MaxPool2d(3, stride=1, padding=1),
64
+ nn.Conv2d(in_planes, pool_planes, kernel_size=1),
65
+ nn.BatchNorm2d(pool_planes),
66
+ nn.ReLU(True),
67
+ )
68
+
69
+ def forward(self, x):
70
+ '''前向传播,将四个分支的输出在通道维度上拼接'''
71
+ b1 = self.branch1(x)
72
+ b2 = self.branch2(x)
73
+ b3 = self.branch3(x)
74
+ b4 = self.branch4(x)
75
+ return torch.cat([b1, b2, b3, b4], 1)
76
+
77
+
78
+ class GoogLeNet(nn.Module):
79
+ '''GoogLeNet/Inception v1网络
80
+
81
+ 特点:
82
+ 1. 使用Inception模块构建深层网络
83
+ 2. 通过1x1卷积降维减少计算量
84
+ 3. 使用全局平均池化代替全连接层减少参数量
85
+ '''
86
+ def __init__(self, num_classes=10):
87
+ super(GoogLeNet, self).__init__()
88
+
89
+ # 第一阶段:标准卷积层
90
+ self.pre_layers = nn.Sequential(
91
+ nn.Conv2d(3, 192, kernel_size=3, padding=1),
92
+ nn.BatchNorm2d(192),
93
+ nn.ReLU(True),
94
+ )
95
+
96
+ # 第二阶段:2个Inception模块
97
+ self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) # 输出通道:256
98
+ self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) # 输出通道:480
99
+
100
+ # 最大池化层
101
+ self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
102
+
103
+ # 第三阶段:5个Inception模块
104
+ self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) # 输出通道:512
105
+ self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) # 输出通道:512
106
+ self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) # 输出通道:512
107
+ self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) # 输出通道:528
108
+ self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) # 输出通道:832
109
+
110
+ # 第四阶段:2个Inception模块
111
+ self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) # 输出通道:832
112
+ self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) # 输出通道:1024
113
+
114
+ # 全局平均池化和分类器
115
+ self.avgpool = nn.AvgPool2d(8, stride=1)
116
+ self.linear = nn.Linear(1024, num_classes)
117
+
118
+ def forward(self, x):
119
+ # 第一阶段
120
+ out = self.pre_layers(x)
121
+
122
+ # 第二阶段
123
+ out = self.a3(out)
124
+ out = self.b3(out)
125
+ out = self.maxpool(out)
126
+
127
+ # 第三阶段
128
+ out = self.a4(out)
129
+ out = self.b4(out)
130
+ out = self.c4(out)
131
+ out = self.d4(out)
132
+ out = self.e4(out)
133
+ out = self.maxpool(out)
134
+
135
+ # 第四阶段
136
+ out = self.a5(out)
137
+ out = self.b5(out)
138
+
139
+ # 分类器
140
+ out = self.avgpool(out)
141
+ out = out.view(out.size(0), -1)
142
+ out = self.linear(out)
143
+ return out
144
+
145
+ def feature(self, x):
146
+ # 第一阶段
147
+ out = self.pre_layers(x)
148
+
149
+ # 第二阶段
150
+ out = self.a3(out)
151
+ out = self.b3(out)
152
+ out = self.maxpool(out)
153
+
154
+ # 第三阶段
155
+ out = self.a4(out)
156
+ out = self.b4(out)
157
+ out = self.c4(out)
158
+ out = self.d4(out)
159
+ out = self.e4(out)
160
+ out = self.maxpool(out)
161
+
162
+ # 第四阶段
163
+ out = self.a5(out)
164
+ out = self.b5(out)
165
+
166
+ # 分类器
167
+ out = self.avgpool(out)
168
+ return out
169
+
170
+ def prediction(self, out):
171
+ out = out.view(out.size(0), -1)
172
+ out = self.linear(out)
173
+ return out
174
+
175
+ def test():
176
+ """测试函数"""
177
+ net = GoogLeNet()
178
+ x = torch.randn(1, 3, 32, 32)
179
+ y = net(x)
180
+ print(y.size())
181
+
182
+ # 打印模型结构
183
+ from torchinfo import summary
184
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
185
+ net = net.to(device)
186
+ summary(net, (1, 3, 32, 32))
187
+
188
+ if __name__ == '__main__':
189
+ test()
GoogLeNet-CIFAR10/Classification-normal/scripts/train.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import yaml
4
+ from pathlib import Path
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.optim as optim
8
+ import time
9
+ import logging
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+
13
+
14
+ from dataset_utils import get_cifar10_dataloaders
15
+ from model import GoogLeNet
16
+ from get_representation import time_travel_saver
17
+
18
+ def setup_logger(log_file):
19
+ """配置日志记录器,如果日志文件存在则覆盖
20
+
21
+ Args:
22
+ log_file: 日志文件路径
23
+
24
+ Returns:
25
+ logger: 配置好的日志记录器
26
+ """
27
+ # 创建logger
28
+ logger = logging.getLogger('train')
29
+ logger.setLevel(logging.INFO)
30
+
31
+ # 移除现有的处理器
32
+ if logger.hasHandlers():
33
+ logger.handlers.clear()
34
+
35
+ # 创建文件处理器,使用'w'模式覆盖现有文件
36
+ fh = logging.FileHandler(log_file, mode='w')
37
+ fh.setLevel(logging.INFO)
38
+
39
+ # 创建控制台处理器
40
+ ch = logging.StreamHandler()
41
+ ch.setLevel(logging.INFO)
42
+
43
+ # 创建格式器
44
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
45
+ fh.setFormatter(formatter)
46
+ ch.setFormatter(formatter)
47
+
48
+ # 添加处理器
49
+ logger.addHandler(fh)
50
+ logger.addHandler(ch)
51
+
52
+ return logger
53
+
54
+ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
55
+ save_dir='./epochs', model_name='model', interval=1):
56
+ """通用的模型训练函数
57
+ Args:
58
+ model: 要训练的模型
59
+ trainloader: 训练数据加载器
60
+ testloader: 测试数据加载器
61
+ epochs: 训练轮数
62
+ lr: 学习率
63
+ device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
64
+ save_dir: 模型保存目录
65
+ model_name: 模型名称
66
+ interval: 模型保存间隔
67
+ """
68
+ # 检查并设置GPU设备
69
+ if not torch.cuda.is_available():
70
+ print("CUDA不可用,将使用CPU训练")
71
+ device = 'cpu'
72
+ elif not device.startswith('cuda:'):
73
+ device = f'cuda:0'
74
+
75
+ # 确保device格式正确
76
+ if device.startswith('cuda:'):
77
+ gpu_id = int(device.split(':')[1])
78
+ if gpu_id >= torch.cuda.device_count():
79
+ print(f"GPU {gpu_id} 不可用,将使用GPU 0")
80
+ device = 'cuda:0'
81
+
82
+ # 设置保存目录
83
+ if not os.path.exists(save_dir):
84
+ os.makedirs(save_dir)
85
+
86
+ # 设置日志文件路径
87
+ log_file = os.path.join(os.path.dirname(save_dir),'epochs', 'train.log')
88
+ if not os.path.exists(os.path.dirname(log_file)):
89
+ os.makedirs(os.path.dirname(log_file))
90
+
91
+ logger = setup_logger(log_file)
92
+
93
+ # 损失函数和优化器
94
+ criterion = nn.CrossEntropyLoss()
95
+ optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
96
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
97
+
98
+ # 移动模型到指定设备
99
+ model = model.to(device)
100
+ best_acc = 0
101
+ start_time = time.time()
102
+
103
+ logger.info(f'开始训练 {model_name}')
104
+ logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
105
+
106
+ for epoch in range(epochs):
107
+ # 训练阶段
108
+ model.train()
109
+ train_loss = 0
110
+ correct = 0
111
+ total = 0
112
+
113
+ train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
114
+ for batch_idx, (inputs, targets) in enumerate(train_pbar):
115
+ inputs, targets = inputs.to(device), targets.to(device)
116
+ optimizer.zero_grad()
117
+ outputs = model(inputs)
118
+ loss = criterion(outputs, targets)
119
+ loss.backward()
120
+ optimizer.step()
121
+
122
+ train_loss += loss.item()
123
+ _, predicted = outputs.max(1)
124
+ total += targets.size(0)
125
+ correct += predicted.eq(targets).sum().item()
126
+
127
+ # 更新进度条
128
+ train_pbar.set_postfix({
129
+ 'loss': f'{train_loss/(batch_idx+1):.3f}',
130
+ 'acc': f'{100.*correct/total:.2f}%'
131
+ })
132
+
133
+ # 保存训练阶段的准确率
134
+ train_acc = 100.*correct/total
135
+ train_correct = correct
136
+ train_total = total
137
+
138
+ # 测试阶段
139
+ model.eval()
140
+ test_loss = 0
141
+ correct = 0
142
+ total = 0
143
+
144
+ test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
145
+ with torch.no_grad():
146
+ for batch_idx, (inputs, targets) in enumerate(test_pbar):
147
+ inputs, targets = inputs.to(device), targets.to(device)
148
+ outputs = model(inputs)
149
+ loss = criterion(outputs, targets)
150
+
151
+ test_loss += loss.item()
152
+ _, predicted = outputs.max(1)
153
+ total += targets.size(0)
154
+ correct += predicted.eq(targets).sum().item()
155
+
156
+ # 更新进度条
157
+ test_pbar.set_postfix({
158
+ 'loss': f'{test_loss/(batch_idx+1):.3f}',
159
+ 'acc': f'{100.*correct/total:.2f}%'
160
+ })
161
+
162
+ # 计算测试精度
163
+ acc = 100.*correct/total
164
+
165
+ # 记录训练和测试的损失与准确率
166
+ logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
167
+ f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
168
+
169
+ # 保存可视化训练过程所需要的文件
170
+ if (epoch + 1) % interval == 0 or (epoch == 0):
171
+ # 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集
172
+ from torch.utils.data import ConcatDataset
173
+
174
+ def custom_collate_fn(batch):
175
+ # 确保所有数据都是张量
176
+ data = [item[0] for item in batch] # 图像
177
+ target = [item[1] for item in batch] # 标签
178
+
179
+ # 将列表转换为张量
180
+ data = torch.stack(data, 0)
181
+ target = torch.tensor(target)
182
+
183
+ return [data, target]
184
+
185
+ # 合并训练集和测试集
186
+ combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset])
187
+
188
+ # 创建顺序数据加载器
189
+ ordered_loader = torch.utils.data.DataLoader(
190
+ combined_dataset, # 使用合并后的数据集
191
+ batch_size=trainloader.batch_size,
192
+ shuffle=False, # 确保顺序加载
193
+ num_workers=trainloader.num_workers,
194
+ collate_fn=custom_collate_fn # 使用自定义的collate函数
195
+ )
196
+ epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
197
+ save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name,
198
+ show=True, layer_name='avgpool', auto_save_embedding=True)
199
+ save_model.save_checkpoint_embeddings_predictions()
200
+ if epoch == 0:
201
+ save_model.save_lables_index(path = "../dataset")
202
+
203
+ scheduler.step()
204
+
205
+ logger.info('训练完成!')
206
+
207
+ def main():
208
+ # 加载配置文件
209
+ config_path = Path(__file__).parent / 'train.yaml'
210
+ with open(config_path) as f:
211
+ config = yaml.safe_load(f)
212
+
213
+ # 创建模型
214
+ model = GoogLeNet(num_classes=10)
215
+
216
+ # 获取数据加载器
217
+ trainloader, testloader = get_cifar10_dataloaders(
218
+ batch_size=128,
219
+ num_workers=2,
220
+ local_dataset_path=config['dataset_path'],
221
+ shuffle=True
222
+ )
223
+
224
+ # 训练模型
225
+ train_model(
226
+ model=model,
227
+ trainloader=trainloader,
228
+ testloader=testloader,
229
+ epochs=config['epochs'],
230
+ lr=config['lr'],
231
+ device=f'cuda:{config["gpu"]}',
232
+ save_dir='../epochs',
233
+ model_name='GoogLeNet',
234
+ interval=config['interval']
235
+ )
236
+
237
+ if __name__ == '__main__':
238
+ main()
GoogLeNet-CIFAR10/Classification-normal/scripts/train.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ batch_size: 128
2
+ num_workers: 2
3
+ dataset_path: ../dataset
4
+ epochs: 50
5
+ gpu: 0
6
+ lr: 0.1
7
+ interval: 2
Image/LeNet5/code/backdoor_train.log DELETED
@@ -1,253 +0,0 @@
1
- 2025-03-14 18:51:19,652 - train - INFO - 开始训练 lenet5
2
- 2025-03-14 18:51:19,652 - train - INFO - 总轮数: 50, 学习率: 0.1, 设备: cuda:2
3
- 2025-03-14 18:51:20,380 - train - INFO - Epoch: 1 | Batch: 0 | Loss: 2.303 | Acc: 10.16%
4
- 2025-03-14 18:51:22,789 - train - INFO - Epoch: 1 | Batch: 100 | Loss: 2.208 | Acc: 19.59%
5
- 2025-03-14 18:51:25,178 - train - INFO - Epoch: 1 | Batch: 200 | Loss: 2.138 | Acc: 20.32%
6
- 2025-03-14 18:51:27,268 - train - INFO - Epoch: 1 | Batch: 300 | Loss: 2.075 | Acc: 22.09%
7
- 2025-03-14 18:51:30,404 - train - INFO - Epoch: 1 | Test Loss: 1.950 | Test Acc: 25.94%
8
- 2025-03-14 18:51:30,814 - train - INFO - Epoch: 2 | Batch: 0 | Loss: 2.080 | Acc: 19.53%
9
- 2025-03-14 18:51:33,059 - train - INFO - Epoch: 2 | Batch: 100 | Loss: 1.910 | Acc: 26.99%
10
- 2025-03-14 18:51:35,303 - train - INFO - Epoch: 2 | Batch: 200 | Loss: 1.907 | Acc: 27.06%
11
- 2025-03-14 18:51:37,458 - train - INFO - Epoch: 2 | Batch: 300 | Loss: 1.891 | Acc: 27.89%
12
- 2025-03-14 18:51:40,819 - train - INFO - Epoch: 2 | Test Loss: 1.806 | Test Acc: 31.65%
13
- 2025-03-14 18:51:50,241 - train - INFO - Epoch: 3 | Batch: 0 | Loss: 1.770 | Acc: 29.69%
14
- 2025-03-14 18:51:52,600 - train - INFO - Epoch: 3 | Batch: 100 | Loss: 1.857 | Acc: 29.28%
15
- 2025-03-14 18:51:54,824 - train - INFO - Epoch: 3 | Batch: 200 | Loss: 1.844 | Acc: 30.26%
16
- 2025-03-14 18:51:57,048 - train - INFO - Epoch: 3 | Batch: 300 | Loss: 1.846 | Acc: 30.12%
17
- 2025-03-14 18:52:00,409 - train - INFO - Epoch: 3 | Test Loss: 1.758 | Test Acc: 33.70%
18
- 2025-03-14 18:52:00,576 - train - INFO - Epoch: 4 | Batch: 0 | Loss: 1.830 | Acc: 28.91%
19
- 2025-03-14 18:52:02,690 - train - INFO - Epoch: 4 | Batch: 100 | Loss: 1.814 | Acc: 31.25%
20
- 2025-03-14 18:52:04,841 - train - INFO - Epoch: 4 | Batch: 200 | Loss: 1.804 | Acc: 31.78%
21
- 2025-03-14 18:52:06,995 - train - INFO - Epoch: 4 | Batch: 300 | Loss: 1.805 | Acc: 32.05%
22
- 2025-03-14 18:52:10,157 - train - INFO - Epoch: 4 | Test Loss: 1.777 | Test Acc: 33.31%
23
- 2025-03-14 18:52:19,581 - train - INFO - Epoch: 5 | Batch: 0 | Loss: 1.773 | Acc: 35.94%
24
- 2025-03-14 18:52:21,775 - train - INFO - Epoch: 5 | Batch: 100 | Loss: 1.773 | Acc: 34.38%
25
- 2025-03-14 18:52:23,865 - train - INFO - Epoch: 5 | Batch: 200 | Loss: 1.751 | Acc: 35.28%
26
- 2025-03-14 18:52:26,032 - train - INFO - Epoch: 5 | Batch: 300 | Loss: 1.740 | Acc: 35.98%
27
- 2025-03-14 18:52:29,181 - train - INFO - Epoch: 5 | Test Loss: 1.667 | Test Acc: 36.89%
28
- 2025-03-14 18:52:29,344 - train - INFO - Epoch: 6 | Batch: 0 | Loss: 1.727 | Acc: 38.28%
29
- 2025-03-14 18:52:31,511 - train - INFO - Epoch: 6 | Batch: 100 | Loss: 1.734 | Acc: 36.10%
30
- 2025-03-14 18:52:33,763 - train - INFO - Epoch: 6 | Batch: 200 | Loss: 1.723 | Acc: 36.55%
31
- 2025-03-14 18:52:35,894 - train - INFO - Epoch: 6 | Batch: 300 | Loss: 1.719 | Acc: 36.65%
32
- 2025-03-14 18:52:39,232 - train - INFO - Epoch: 6 | Test Loss: 1.720 | Test Acc: 36.32%
33
- 2025-03-14 18:52:48,612 - train - INFO - Epoch: 7 | Batch: 0 | Loss: 1.590 | Acc: 45.31%
34
- 2025-03-14 18:52:50,865 - train - INFO - Epoch: 7 | Batch: 100 | Loss: 1.648 | Acc: 39.59%
35
- 2025-03-14 18:52:53,028 - train - INFO - Epoch: 7 | Batch: 200 | Loss: 1.659 | Acc: 38.78%
36
- 2025-03-14 18:52:55,063 - train - INFO - Epoch: 7 | Batch: 300 | Loss: 1.656 | Acc: 39.38%
37
- 2025-03-14 18:52:58,384 - train - INFO - Epoch: 7 | Test Loss: 1.744 | Test Acc: 34.71%
38
- 2025-03-14 18:52:58,567 - train - INFO - Epoch: 8 | Batch: 0 | Loss: 1.699 | Acc: 34.38%
39
- 2025-03-14 18:53:00,712 - train - INFO - Epoch: 8 | Batch: 100 | Loss: 1.627 | Acc: 40.86%
40
- 2025-03-14 18:53:03,147 - train - INFO - Epoch: 8 | Batch: 200 | Loss: 1.647 | Acc: 40.62%
41
- 2025-03-14 18:53:05,342 - train - INFO - Epoch: 8 | Batch: 300 | Loss: 1.647 | Acc: 40.75%
42
- 2025-03-14 18:53:08,945 - train - INFO - Epoch: 8 | Test Loss: 1.612 | Test Acc: 41.16%
43
- 2025-03-14 18:53:18,871 - train - INFO - Epoch: 9 | Batch: 0 | Loss: 1.812 | Acc: 36.72%
44
- 2025-03-14 18:53:21,249 - train - INFO - Epoch: 9 | Batch: 100 | Loss: 1.644 | Acc: 40.66%
45
- 2025-03-14 18:53:23,450 - train - INFO - Epoch: 9 | Batch: 200 | Loss: 1.627 | Acc: 41.32%
46
- 2025-03-14 18:53:25,638 - train - INFO - Epoch: 9 | Batch: 300 | Loss: 1.631 | Acc: 41.23%
47
- 2025-03-14 18:53:28,926 - train - INFO - Epoch: 9 | Test Loss: 1.592 | Test Acc: 42.59%
48
- 2025-03-14 18:53:29,088 - train - INFO - Epoch: 10 | Batch: 0 | Loss: 1.626 | Acc: 41.41%
49
- 2025-03-14 18:53:31,206 - train - INFO - Epoch: 10 | Batch: 100 | Loss: 1.612 | Acc: 42.34%
50
- 2025-03-14 18:53:33,239 - train - INFO - Epoch: 10 | Batch: 200 | Loss: 1.617 | Acc: 42.14%
51
- 2025-03-14 18:53:35,375 - train - INFO - Epoch: 10 | Batch: 300 | Loss: 1.623 | Acc: 41.96%
52
- 2025-03-14 18:53:38,613 - train - INFO - Epoch: 10 | Test Loss: 1.616 | Test Acc: 43.11%
53
- 2025-03-14 18:53:47,913 - train - INFO - Epoch: 11 | Batch: 0 | Loss: 1.573 | Acc: 38.28%
54
- 2025-03-14 18:53:50,081 - train - INFO - Epoch: 11 | Batch: 100 | Loss: 1.611 | Acc: 43.01%
55
- 2025-03-14 18:53:52,274 - train - INFO - Epoch: 11 | Batch: 200 | Loss: 1.596 | Acc: 43.17%
56
- 2025-03-14 18:53:54,447 - train - INFO - Epoch: 11 | Batch: 300 | Loss: 1.589 | Acc: 43.13%
57
- 2025-03-14 18:53:57,925 - train - INFO - Epoch: 11 | Test Loss: 1.554 | Test Acc: 45.18%
58
- 2025-03-14 18:53:58,109 - train - INFO - Epoch: 12 | Batch: 0 | Loss: 1.450 | Acc: 45.31%
59
- 2025-03-14 18:54:00,335 - train - INFO - Epoch: 12 | Batch: 100 | Loss: 1.610 | Acc: 41.93%
60
- 2025-03-14 18:54:02,491 - train - INFO - Epoch: 12 | Batch: 200 | Loss: 1.611 | Acc: 42.44%
61
- 2025-03-14 18:54:04,671 - train - INFO - Epoch: 12 | Batch: 300 | Loss: 1.594 | Acc: 42.97%
62
- 2025-03-14 18:54:07,901 - train - INFO - Epoch: 12 | Test Loss: 1.563 | Test Acc: 47.00%
63
- 2025-03-14 18:54:17,119 - train - INFO - Epoch: 13 | Batch: 0 | Loss: 1.498 | Acc: 53.12%
64
- 2025-03-14 18:54:19,213 - train - INFO - Epoch: 13 | Batch: 100 | Loss: 1.624 | Acc: 42.75%
65
- 2025-03-14 18:54:21,306 - train - INFO - Epoch: 13 | Batch: 200 | Loss: 1.606 | Acc: 43.22%
66
- 2025-03-14 18:54:23,344 - train - INFO - Epoch: 13 | Batch: 300 | Loss: 1.600 | Acc: 43.19%
67
- 2025-03-14 18:54:26,670 - train - INFO - Epoch: 13 | Test Loss: 1.666 | Test Acc: 39.55%
68
- 2025-03-14 18:54:26,845 - train - INFO - Epoch: 14 | Batch: 0 | Loss: 1.680 | Acc: 39.84%
69
- 2025-03-14 18:54:28,959 - train - INFO - Epoch: 14 | Batch: 100 | Loss: 1.583 | Acc: 43.79%
70
- 2025-03-14 18:54:31,171 - train - INFO - Epoch: 14 | Batch: 200 | Loss: 1.568 | Acc: 44.59%
71
- 2025-03-14 18:54:33,362 - train - INFO - Epoch: 14 | Batch: 300 | Loss: 1.581 | Acc: 44.00%
72
- 2025-03-14 18:54:36,845 - train - INFO - Epoch: 14 | Test Loss: 1.533 | Test Acc: 45.22%
73
- 2025-03-14 18:54:47,390 - train - INFO - Epoch: 15 | Batch: 0 | Loss: 1.500 | Acc: 46.88%
74
- 2025-03-14 18:54:49,691 - train - INFO - Epoch: 15 | Batch: 100 | Loss: 1.537 | Acc: 46.24%
75
- 2025-03-14 18:54:51,869 - train - INFO - Epoch: 15 | Batch: 200 | Loss: 1.557 | Acc: 45.44%
76
- 2025-03-14 18:54:53,985 - train - INFO - Epoch: 15 | Batch: 300 | Loss: 1.564 | Acc: 45.09%
77
- 2025-03-14 18:54:57,177 - train - INFO - Epoch: 15 | Test Loss: 1.530 | Test Acc: 47.11%
78
- 2025-03-14 18:54:57,341 - train - INFO - Epoch: 16 | Batch: 0 | Loss: 1.560 | Acc: 48.44%
79
- 2025-03-14 18:54:59,495 - train - INFO - Epoch: 16 | Batch: 100 | Loss: 1.582 | Acc: 43.63%
80
- 2025-03-14 18:55:01,686 - train - INFO - Epoch: 16 | Batch: 200 | Loss: 1.583 | Acc: 43.79%
81
- 2025-03-14 18:55:03,817 - train - INFO - Epoch: 16 | Batch: 300 | Loss: 1.588 | Acc: 43.62%
82
- 2025-03-14 18:55:07,207 - train - INFO - Epoch: 16 | Test Loss: 1.551 | Test Acc: 45.91%
83
- 2025-03-14 18:55:16,376 - train - INFO - Epoch: 17 | Batch: 0 | Loss: 1.535 | Acc: 46.09%
84
- 2025-03-14 18:55:18,547 - train - INFO - Epoch: 17 | Batch: 100 | Loss: 1.551 | Acc: 45.02%
85
- 2025-03-14 18:55:20,727 - train - INFO - Epoch: 17 | Batch: 200 | Loss: 1.544 | Acc: 45.37%
86
- 2025-03-14 18:55:22,965 - train - INFO - Epoch: 17 | Batch: 300 | Loss: 1.556 | Acc: 44.91%
87
- 2025-03-14 18:55:26,302 - train - INFO - Epoch: 17 | Test Loss: 1.539 | Test Acc: 45.68%
88
- 2025-03-14 18:55:26,472 - train - INFO - Epoch: 18 | Batch: 0 | Loss: 1.663 | Acc: 42.19%
89
- 2025-03-14 18:55:28,773 - train - INFO - Epoch: 18 | Batch: 100 | Loss: 1.586 | Acc: 44.14%
90
- 2025-03-14 18:55:30,980 - train - INFO - Epoch: 18 | Batch: 200 | Loss: 1.577 | Acc: 44.25%
91
- 2025-03-14 18:55:33,160 - train - INFO - Epoch: 18 | Batch: 300 | Loss: 1.569 | Acc: 44.71%
92
- 2025-03-14 18:55:36,539 - train - INFO - Epoch: 18 | Test Loss: 1.540 | Test Acc: 45.29%
93
- 2025-03-14 18:55:45,385 - train - INFO - Epoch: 19 | Batch: 0 | Loss: 1.668 | Acc: 37.50%
94
- 2025-03-14 18:55:47,447 - train - INFO - Epoch: 19 | Batch: 100 | Loss: 1.580 | Acc: 44.69%
95
- 2025-03-14 18:55:49,614 - train - INFO - Epoch: 19 | Batch: 200 | Loss: 1.562 | Acc: 45.13%
96
- 2025-03-14 18:55:51,710 - train - INFO - Epoch: 19 | Batch: 300 | Loss: 1.561 | Acc: 45.20%
97
- 2025-03-14 18:55:55,165 - train - INFO - Epoch: 19 | Test Loss: 1.507 | Test Acc: 48.32%
98
- 2025-03-14 18:55:55,342 - train - INFO - Epoch: 20 | Batch: 0 | Loss: 1.495 | Acc: 43.75%
99
- 2025-03-14 18:55:57,635 - train - INFO - Epoch: 20 | Batch: 100 | Loss: 1.536 | Acc: 45.37%
100
- 2025-03-14 18:55:59,839 - train - INFO - Epoch: 20 | Batch: 200 | Loss: 1.547 | Acc: 44.96%
101
- 2025-03-14 18:56:01,970 - train - INFO - Epoch: 20 | Batch: 300 | Loss: 1.548 | Acc: 45.40%
102
- 2025-03-14 18:56:05,495 - train - INFO - Epoch: 20 | Test Loss: 1.528 | Test Acc: 46.53%
103
- 2025-03-14 18:56:14,208 - train - INFO - Epoch: 21 | Batch: 0 | Loss: 1.659 | Acc: 46.09%
104
- 2025-03-14 18:56:16,352 - train - INFO - Epoch: 21 | Batch: 100 | Loss: 1.562 | Acc: 45.17%
105
- 2025-03-14 18:56:18,446 - train - INFO - Epoch: 21 | Batch: 200 | Loss: 1.566 | Acc: 45.19%
106
- 2025-03-14 18:56:20,611 - train - INFO - Epoch: 21 | Batch: 300 | Loss: 1.556 | Acc: 45.54%
107
- 2025-03-14 18:56:24,010 - train - INFO - Epoch: 21 | Test Loss: 1.623 | Test Acc: 44.64%
108
- 2025-03-14 18:56:24,185 - train - INFO - Epoch: 22 | Batch: 0 | Loss: 1.453 | Acc: 47.66%
109
- 2025-03-14 18:56:26,310 - train - INFO - Epoch: 22 | Batch: 100 | Loss: 1.546 | Acc: 45.95%
110
- 2025-03-14 18:56:28,333 - train - INFO - Epoch: 22 | Batch: 200 | Loss: 1.550 | Acc: 45.71%
111
- 2025-03-14 18:56:30,502 - train - INFO - Epoch: 22 | Batch: 300 | Loss: 1.547 | Acc: 45.70%
112
- 2025-03-14 18:56:33,715 - train - INFO - Epoch: 22 | Test Loss: 1.516 | Test Acc: 46.97%
113
- 2025-03-14 18:56:42,854 - train - INFO - Epoch: 23 | Batch: 0 | Loss: 1.692 | Acc: 45.31%
114
- 2025-03-14 18:56:45,157 - train - INFO - Epoch: 23 | Batch: 100 | Loss: 1.559 | Acc: 44.98%
115
- 2025-03-14 18:56:47,388 - train - INFO - Epoch: 23 | Batch: 200 | Loss: 1.545 | Acc: 45.38%
116
- 2025-03-14 18:56:49,617 - train - INFO - Epoch: 23 | Batch: 300 | Loss: 1.546 | Acc: 45.66%
117
- 2025-03-14 18:56:52,961 - train - INFO - Epoch: 23 | Test Loss: 1.481 | Test Acc: 48.12%
118
- 2025-03-14 18:56:53,138 - train - INFO - Epoch: 24 | Batch: 0 | Loss: 1.525 | Acc: 48.44%
119
- 2025-03-14 18:56:55,474 - train - INFO - Epoch: 24 | Batch: 100 | Loss: 1.538 | Acc: 46.25%
120
- 2025-03-14 18:56:57,728 - train - INFO - Epoch: 24 | Batch: 200 | Loss: 1.548 | Acc: 45.48%
121
- 2025-03-14 18:57:00,002 - train - INFO - Epoch: 24 | Batch: 300 | Loss: 1.547 | Acc: 45.61%
122
- 2025-03-14 18:57:03,345 - train - INFO - Epoch: 24 | Test Loss: 1.480 | Test Acc: 48.34%
123
- 2025-03-14 18:57:12,515 - train - INFO - Epoch: 25 | Batch: 0 | Loss: 1.395 | Acc: 45.31%
124
- 2025-03-14 18:57:14,685 - train - INFO - Epoch: 25 | Batch: 100 | Loss: 1.532 | Acc: 46.67%
125
- 2025-03-14 18:57:16,799 - train - INFO - Epoch: 25 | Batch: 200 | Loss: 1.524 | Acc: 46.82%
126
- 2025-03-14 18:57:19,039 - train - INFO - Epoch: 25 | Batch: 300 | Loss: 1.517 | Acc: 46.98%
127
- 2025-03-14 18:57:22,516 - train - INFO - Epoch: 25 | Test Loss: 1.553 | Test Acc: 46.03%
128
- 2025-03-14 18:57:22,683 - train - INFO - Epoch: 26 | Batch: 0 | Loss: 1.736 | Acc: 44.53%
129
- 2025-03-14 18:57:24,762 - train - INFO - Epoch: 26 | Batch: 100 | Loss: 1.509 | Acc: 46.84%
130
- 2025-03-14 18:57:26,861 - train - INFO - Epoch: 26 | Batch: 200 | Loss: 1.519 | Acc: 46.68%
131
- 2025-03-14 18:57:29,066 - train - INFO - Epoch: 26 | Batch: 300 | Loss: 1.519 | Acc: 46.61%
132
- 2025-03-14 18:57:32,297 - train - INFO - Epoch: 26 | Test Loss: 1.525 | Test Acc: 46.22%
133
- 2025-03-14 18:57:41,935 - train - INFO - Epoch: 27 | Batch: 0 | Loss: 1.524 | Acc: 50.78%
134
- 2025-03-14 18:57:44,126 - train - INFO - Epoch: 27 | Batch: 100 | Loss: 1.526 | Acc: 45.78%
135
- 2025-03-14 18:57:46,507 - train - INFO - Epoch: 27 | Batch: 200 | Loss: 1.519 | Acc: 46.54%
136
- 2025-03-14 18:57:48,982 - train - INFO - Epoch: 27 | Batch: 300 | Loss: 1.525 | Acc: 46.30%
137
- 2025-03-14 18:57:52,953 - train - INFO - Epoch: 27 | Test Loss: 1.485 | Test Acc: 47.34%
138
- 2025-03-14 18:57:53,134 - train - INFO - Epoch: 28 | Batch: 0 | Loss: 1.597 | Acc: 44.53%
139
- 2025-03-14 18:57:55,479 - train - INFO - Epoch: 28 | Batch: 100 | Loss: 1.521 | Acc: 47.34%
140
- 2025-03-14 18:57:57,669 - train - INFO - Epoch: 28 | Batch: 200 | Loss: 1.527 | Acc: 46.73%
141
- 2025-03-14 18:57:59,857 - train - INFO - Epoch: 28 | Batch: 300 | Loss: 1.540 | Acc: 46.01%
142
- 2025-03-14 18:58:03,284 - train - INFO - Epoch: 28 | Test Loss: 1.585 | Test Acc: 46.93%
143
- 2025-03-14 18:58:13,439 - train - INFO - Epoch: 29 | Batch: 0 | Loss: 1.544 | Acc: 47.66%
144
- 2025-03-14 18:58:15,897 - train - INFO - Epoch: 29 | Batch: 100 | Loss: 1.481 | Acc: 48.57%
145
- 2025-03-14 18:58:18,632 - train - INFO - Epoch: 29 | Batch: 200 | Loss: 1.503 | Acc: 47.80%
146
- 2025-03-14 18:58:20,904 - train - INFO - Epoch: 29 | Batch: 300 | Loss: 1.512 | Acc: 47.66%
147
- 2025-03-14 18:58:24,312 - train - INFO - Epoch: 29 | Test Loss: 1.542 | Test Acc: 45.92%
148
- 2025-03-14 18:58:24,483 - train - INFO - Epoch: 30 | Batch: 0 | Loss: 1.539 | Acc: 44.53%
149
- 2025-03-14 18:58:26,696 - train - INFO - Epoch: 30 | Batch: 100 | Loss: 1.498 | Acc: 47.67%
150
- 2025-03-14 18:58:28,804 - train - INFO - Epoch: 30 | Batch: 200 | Loss: 1.515 | Acc: 47.15%
151
- 2025-03-14 18:58:31,277 - train - INFO - Epoch: 30 | Batch: 300 | Loss: 1.509 | Acc: 47.32%
152
- 2025-03-14 18:58:34,684 - train - INFO - Epoch: 30 | Test Loss: 1.489 | Test Acc: 48.24%
153
- 2025-03-14 18:58:43,983 - train - INFO - Epoch: 31 | Batch: 0 | Loss: 1.627 | Acc: 44.53%
154
- 2025-03-14 18:58:46,126 - train - INFO - Epoch: 31 | Batch: 100 | Loss: 1.493 | Acc: 48.05%
155
- 2025-03-14 18:58:48,265 - train - INFO - Epoch: 31 | Batch: 200 | Loss: 1.509 | Acc: 47.26%
156
- 2025-03-14 18:58:50,534 - train - INFO - Epoch: 31 | Batch: 300 | Loss: 1.504 | Acc: 47.44%
157
- 2025-03-14 18:58:53,718 - train - INFO - Epoch: 31 | Test Loss: 1.490 | Test Acc: 47.97%
158
- 2025-03-14 18:58:53,855 - train - INFO - Epoch: 32 | Batch: 0 | Loss: 1.397 | Acc: 52.34%
159
- 2025-03-14 18:58:55,938 - train - INFO - Epoch: 32 | Batch: 100 | Loss: 1.527 | Acc: 46.51%
160
- 2025-03-14 18:58:58,089 - train - INFO - Epoch: 32 | Batch: 200 | Loss: 1.511 | Acc: 47.57%
161
- 2025-03-14 18:59:00,263 - train - INFO - Epoch: 32 | Batch: 300 | Loss: 1.505 | Acc: 47.70%
162
- 2025-03-14 18:59:03,515 - train - INFO - Epoch: 32 | Test Loss: 1.502 | Test Acc: 47.83%
163
- 2025-03-14 18:59:12,676 - train - INFO - Epoch: 33 | Batch: 0 | Loss: 1.443 | Acc: 48.44%
164
- 2025-03-14 18:59:14,901 - train - INFO - Epoch: 33 | Batch: 100 | Loss: 1.501 | Acc: 47.44%
165
- 2025-03-14 18:59:17,010 - train - INFO - Epoch: 33 | Batch: 200 | Loss: 1.494 | Acc: 47.66%
166
- 2025-03-14 18:59:19,083 - train - INFO - Epoch: 33 | Batch: 300 | Loss: 1.497 | Acc: 47.59%
167
- 2025-03-14 18:59:22,262 - train - INFO - Epoch: 33 | Test Loss: 1.502 | Test Acc: 47.90%
168
- 2025-03-14 18:59:22,416 - train - INFO - Epoch: 34 | Batch: 0 | Loss: 1.555 | Acc: 41.41%
169
- 2025-03-14 18:59:24,591 - train - INFO - Epoch: 34 | Batch: 100 | Loss: 1.505 | Acc: 47.66%
170
- 2025-03-14 18:59:26,681 - train - INFO - Epoch: 34 | Batch: 200 | Loss: 1.504 | Acc: 47.73%
171
- 2025-03-14 18:59:28,785 - train - INFO - Epoch: 34 | Batch: 300 | Loss: 1.500 | Acc: 47.84%
172
- 2025-03-14 18:59:31,963 - train - INFO - Epoch: 34 | Test Loss: 1.505 | Test Acc: 46.85%
173
- 2025-03-14 18:59:40,980 - train - INFO - Epoch: 35 | Batch: 0 | Loss: 1.479 | Acc: 46.88%
174
- 2025-03-14 18:59:43,177 - train - INFO - Epoch: 35 | Batch: 100 | Loss: 1.509 | Acc: 47.49%
175
- 2025-03-14 18:59:45,274 - train - INFO - Epoch: 35 | Batch: 200 | Loss: 1.511 | Acc: 47.25%
176
- 2025-03-14 18:59:47,396 - train - INFO - Epoch: 35 | Batch: 300 | Loss: 1.510 | Acc: 47.32%
177
- 2025-03-14 18:59:50,653 - train - INFO - Epoch: 35 | Test Loss: 1.401 | Test Acc: 51.25%
178
- 2025-03-14 18:59:50,838 - train - INFO - Epoch: 36 | Batch: 0 | Loss: 1.299 | Acc: 59.38%
179
- 2025-03-14 18:59:52,839 - train - INFO - Epoch: 36 | Batch: 100 | Loss: 1.482 | Acc: 48.78%
180
- 2025-03-14 18:59:54,989 - train - INFO - Epoch: 36 | Batch: 200 | Loss: 1.490 | Acc: 48.33%
181
- 2025-03-14 18:59:56,980 - train - INFO - Epoch: 36 | Batch: 300 | Loss: 1.485 | Acc: 48.47%
182
- 2025-03-14 19:00:00,116 - train - INFO - Epoch: 36 | Test Loss: 1.489 | Test Acc: 47.70%
183
- 2025-03-14 19:00:09,044 - train - INFO - Epoch: 37 | Batch: 0 | Loss: 1.378 | Acc: 52.34%
184
- 2025-03-14 19:00:11,337 - train - INFO - Epoch: 37 | Batch: 100 | Loss: 1.467 | Acc: 49.08%
185
- 2025-03-14 19:00:13,664 - train - INFO - Epoch: 37 | Batch: 200 | Loss: 1.466 | Acc: 48.83%
186
- 2025-03-14 19:00:15,943 - train - INFO - Epoch: 37 | Batch: 300 | Loss: 1.463 | Acc: 48.93%
187
- 2025-03-14 19:00:19,369 - train - INFO - Epoch: 37 | Test Loss: 1.534 | Test Acc: 46.58%
188
- 2025-03-14 19:00:19,523 - train - INFO - Epoch: 38 | Batch: 0 | Loss: 1.550 | Acc: 44.53%
189
- 2025-03-14 19:00:21,879 - train - INFO - Epoch: 38 | Batch: 100 | Loss: 1.476 | Acc: 48.53%
190
- 2025-03-14 19:00:24,365 - train - INFO - Epoch: 38 | Batch: 200 | Loss: 1.495 | Acc: 47.90%
191
- 2025-03-14 19:00:26,878 - train - INFO - Epoch: 38 | Batch: 300 | Loss: 1.494 | Acc: 48.06%
192
- 2025-03-14 19:00:30,363 - train - INFO - Epoch: 38 | Test Loss: 1.502 | Test Acc: 48.86%
193
- 2025-03-14 19:00:40,202 - train - INFO - Epoch: 39 | Batch: 0 | Loss: 1.594 | Acc: 49.22%
194
- 2025-03-14 19:00:42,476 - train - INFO - Epoch: 39 | Batch: 100 | Loss: 1.472 | Acc: 48.95%
195
- 2025-03-14 19:00:44,677 - train - INFO - Epoch: 39 | Batch: 200 | Loss: 1.470 | Acc: 49.00%
196
- 2025-03-14 19:00:46,770 - train - INFO - Epoch: 39 | Batch: 300 | Loss: 1.457 | Acc: 49.47%
197
- 2025-03-14 19:00:50,216 - train - INFO - Epoch: 39 | Test Loss: 1.392 | Test Acc: 50.99%
198
- 2025-03-14 19:00:50,372 - train - INFO - Epoch: 40 | Batch: 0 | Loss: 1.643 | Acc: 42.97%
199
- 2025-03-14 19:00:52,597 - train - INFO - Epoch: 40 | Batch: 100 | Loss: 1.492 | Acc: 48.14%
200
- 2025-03-14 19:00:54,733 - train - INFO - Epoch: 40 | Batch: 200 | Loss: 1.488 | Acc: 48.10%
201
- 2025-03-14 19:00:56,903 - train - INFO - Epoch: 40 | Batch: 300 | Loss: 1.476 | Acc: 48.38%
202
- 2025-03-14 19:01:00,186 - train - INFO - Epoch: 40 | Test Loss: 1.396 | Test Acc: 51.11%
203
- 2025-03-14 19:01:09,458 - train - INFO - Epoch: 41 | Batch: 0 | Loss: 1.446 | Acc: 49.22%
204
- 2025-03-14 19:01:11,568 - train - INFO - Epoch: 41 | Batch: 100 | Loss: 1.477 | Acc: 49.01%
205
- 2025-03-14 19:01:13,524 - train - INFO - Epoch: 41 | Batch: 200 | Loss: 1.467 | Acc: 49.21%
206
- 2025-03-14 19:01:15,623 - train - INFO - Epoch: 41 | Batch: 300 | Loss: 1.473 | Acc: 49.07%
207
- 2025-03-14 19:01:18,753 - train - INFO - Epoch: 41 | Test Loss: 1.369 | Test Acc: 53.32%
208
- 2025-03-14 19:01:18,976 - train - INFO - Epoch: 42 | Batch: 0 | Loss: 1.483 | Acc: 46.88%
209
- 2025-03-14 19:01:20,939 - train - INFO - Epoch: 42 | Batch: 100 | Loss: 1.466 | Acc: 48.96%
210
- 2025-03-14 19:01:22,879 - train - INFO - Epoch: 42 | Batch: 200 | Loss: 1.470 | Acc: 48.71%
211
- 2025-03-14 19:01:24,891 - train - INFO - Epoch: 42 | Batch: 300 | Loss: 1.473 | Acc: 48.65%
212
- 2025-03-14 19:01:27,749 - train - INFO - Epoch: 42 | Test Loss: 1.397 | Test Acc: 51.39%
213
- 2025-03-14 19:01:36,173 - train - INFO - Epoch: 43 | Batch: 0 | Loss: 1.305 | Acc: 52.34%
214
- 2025-03-14 19:01:38,245 - train - INFO - Epoch: 43 | Batch: 100 | Loss: 1.444 | Acc: 49.63%
215
- 2025-03-14 19:01:40,295 - train - INFO - Epoch: 43 | Batch: 200 | Loss: 1.458 | Acc: 48.98%
216
- 2025-03-14 19:01:42,370 - train - INFO - Epoch: 43 | Batch: 300 | Loss: 1.471 | Acc: 48.66%
217
- 2025-03-14 19:01:45,581 - train - INFO - Epoch: 43 | Test Loss: 1.378 | Test Acc: 52.89%
218
- 2025-03-14 19:01:45,747 - train - INFO - Epoch: 44 | Batch: 0 | Loss: 1.317 | Acc: 57.81%
219
- 2025-03-14 19:01:48,037 - train - INFO - Epoch: 44 | Batch: 100 | Loss: 1.460 | Acc: 49.73%
220
- 2025-03-14 19:01:50,122 - train - INFO - Epoch: 44 | Batch: 200 | Loss: 1.431 | Acc: 50.60%
221
- 2025-03-14 19:01:52,184 - train - INFO - Epoch: 44 | Batch: 300 | Loss: 1.447 | Acc: 50.02%
222
- 2025-03-14 19:01:55,371 - train - INFO - Epoch: 44 | Test Loss: 1.472 | Test Acc: 49.33%
223
- 2025-03-14 19:02:04,270 - train - INFO - Epoch: 45 | Batch: 0 | Loss: 1.547 | Acc: 48.44%
224
- 2025-03-14 19:02:06,320 - train - INFO - Epoch: 45 | Batch: 100 | Loss: 1.469 | Acc: 49.03%
225
- 2025-03-14 19:02:08,399 - train - INFO - Epoch: 45 | Batch: 200 | Loss: 1.467 | Acc: 49.28%
226
- 2025-03-14 19:02:10,409 - train - INFO - Epoch: 45 | Batch: 300 | Loss: 1.461 | Acc: 49.32%
227
- 2025-03-14 19:02:13,526 - train - INFO - Epoch: 45 | Test Loss: 1.475 | Test Acc: 50.11%
228
- 2025-03-14 19:02:13,687 - train - INFO - Epoch: 46 | Batch: 0 | Loss: 1.603 | Acc: 50.00%
229
- 2025-03-14 19:02:15,735 - train - INFO - Epoch: 46 | Batch: 100 | Loss: 1.468 | Acc: 49.56%
230
- 2025-03-14 19:02:17,836 - train - INFO - Epoch: 46 | Batch: 200 | Loss: 1.457 | Acc: 49.54%
231
- 2025-03-14 19:02:19,934 - train - INFO - Epoch: 46 | Batch: 300 | Loss: 1.454 | Acc: 49.82%
232
- 2025-03-14 19:02:23,205 - train - INFO - Epoch: 46 | Test Loss: 1.467 | Test Acc: 49.27%
233
- 2025-03-14 19:02:31,634 - train - INFO - Epoch: 47 | Batch: 0 | Loss: 1.506 | Acc: 47.66%
234
- 2025-03-14 19:02:33,626 - train - INFO - Epoch: 47 | Batch: 100 | Loss: 1.447 | Acc: 50.19%
235
- 2025-03-14 19:02:35,568 - train - INFO - Epoch: 47 | Batch: 200 | Loss: 1.451 | Acc: 49.84%
236
- 2025-03-14 19:02:37,841 - train - INFO - Epoch: 47 | Batch: 300 | Loss: 1.439 | Acc: 50.25%
237
- 2025-03-14 19:02:41,240 - train - INFO - Epoch: 47 | Test Loss: 1.543 | Test Acc: 46.04%
238
- 2025-03-14 19:02:41,419 - train - INFO - Epoch: 48 | Batch: 0 | Loss: 1.628 | Acc: 45.31%
239
- 2025-03-14 19:02:43,631 - train - INFO - Epoch: 48 | Batch: 100 | Loss: 1.464 | Acc: 49.07%
240
- 2025-03-14 19:02:45,773 - train - INFO - Epoch: 48 | Batch: 200 | Loss: 1.454 | Acc: 49.46%
241
- 2025-03-14 19:02:47,757 - train - INFO - Epoch: 48 | Batch: 300 | Loss: 1.468 | Acc: 49.15%
242
- 2025-03-14 19:02:50,760 - train - INFO - Epoch: 48 | Test Loss: 1.404 | Test Acc: 50.03%
243
- 2025-03-14 19:02:58,934 - train - INFO - Epoch: 49 | Batch: 0 | Loss: 1.270 | Acc: 52.34%
244
- 2025-03-14 19:03:00,986 - train - INFO - Epoch: 49 | Batch: 100 | Loss: 1.457 | Acc: 49.50%
245
- 2025-03-14 19:03:03,024 - train - INFO - Epoch: 49 | Batch: 200 | Loss: 1.458 | Acc: 49.52%
246
- 2025-03-14 19:03:05,108 - train - INFO - Epoch: 49 | Batch: 300 | Loss: 1.460 | Acc: 49.37%
247
- 2025-03-14 19:03:08,092 - train - INFO - Epoch: 49 | Test Loss: 1.356 | Test Acc: 53.54%
248
- 2025-03-14 19:03:08,239 - train - INFO - Epoch: 50 | Batch: 0 | Loss: 1.282 | Acc: 51.56%
249
- 2025-03-14 19:03:10,262 - train - INFO - Epoch: 50 | Batch: 100 | Loss: 1.460 | Acc: 48.94%
250
- 2025-03-14 19:03:12,119 - train - INFO - Epoch: 50 | Batch: 200 | Loss: 1.465 | Acc: 48.87%
251
- 2025-03-14 19:03:14,009 - train - INFO - Epoch: 50 | Batch: 300 | Loss: 1.448 | Acc: 49.82%
252
- 2025-03-14 19:03:16,891 - train - INFO - Epoch: 50 | Test Loss: 1.402 | Test Acc: 51.80%
253
- 2025-03-14 19:03:24,969 - train - INFO - 训练完成!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/LeNet5/code/train.log DELETED
@@ -1,253 +0,0 @@
1
- 2025-03-14 18:42:58,457 - train - INFO - 开始训练 lenet5
2
- 2025-03-14 18:42:58,466 - train - INFO - 总轮数: 50, 学习率: 0.1, 设备: cuda:3
3
- 2025-03-14 18:42:59,293 - train - INFO - Epoch: 1 | Batch: 0 | Loss: 2.303 | Acc: 10.94%
4
- 2025-03-14 18:43:01,471 - train - INFO - Epoch: 1 | Batch: 100 | Loss: 2.266 | Acc: 12.62%
5
- 2025-03-14 18:43:03,531 - train - INFO - Epoch: 1 | Batch: 200 | Loss: 2.178 | Acc: 15.37%
6
- 2025-03-14 18:43:05,648 - train - INFO - Epoch: 1 | Batch: 300 | Loss: 2.099 | Acc: 18.60%
7
- 2025-03-14 18:43:08,912 - train - INFO - Epoch: 1 | Test Loss: 1.768 | Test Acc: 33.62%
8
- 2025-03-14 18:43:09,291 - train - INFO - Epoch: 2 | Batch: 0 | Loss: 1.851 | Acc: 26.56%
9
- 2025-03-14 18:43:11,591 - train - INFO - Epoch: 2 | Batch: 100 | Loss: 1.848 | Acc: 30.46%
10
- 2025-03-14 18:43:13,743 - train - INFO - Epoch: 2 | Batch: 200 | Loss: 1.831 | Acc: 31.40%
11
- 2025-03-14 18:43:16,315 - train - INFO - Epoch: 2 | Batch: 300 | Loss: 1.823 | Acc: 31.80%
12
- 2025-03-14 18:43:19,766 - train - INFO - Epoch: 2 | Test Loss: 1.662 | Test Acc: 38.37%
13
- 2025-03-14 18:43:28,827 - train - INFO - Epoch: 3 | Batch: 0 | Loss: 1.742 | Acc: 33.59%
14
- 2025-03-14 18:43:31,227 - train - INFO - Epoch: 3 | Batch: 100 | Loss: 1.772 | Acc: 34.92%
15
- 2025-03-14 18:43:33,508 - train - INFO - Epoch: 3 | Batch: 200 | Loss: 1.755 | Acc: 34.99%
16
- 2025-03-14 18:43:35,700 - train - INFO - Epoch: 3 | Batch: 300 | Loss: 1.750 | Acc: 35.12%
17
- 2025-03-14 18:43:39,124 - train - INFO - Epoch: 3 | Test Loss: 1.663 | Test Acc: 38.84%
18
- 2025-03-14 18:43:39,305 - train - INFO - Epoch: 4 | Batch: 0 | Loss: 1.855 | Acc: 29.69%
19
- 2025-03-14 18:43:41,442 - train - INFO - Epoch: 4 | Batch: 100 | Loss: 1.715 | Acc: 36.98%
20
- 2025-03-14 18:43:43,675 - train - INFO - Epoch: 4 | Batch: 200 | Loss: 1.721 | Acc: 36.66%
21
- 2025-03-14 18:43:45,928 - train - INFO - Epoch: 4 | Batch: 300 | Loss: 1.704 | Acc: 37.20%
22
- 2025-03-14 18:43:49,305 - train - INFO - Epoch: 4 | Test Loss: 1.584 | Test Acc: 41.15%
23
- 2025-03-14 18:43:58,749 - train - INFO - Epoch: 5 | Batch: 0 | Loss: 1.593 | Acc: 39.06%
24
- 2025-03-14 18:44:01,045 - train - INFO - Epoch: 5 | Batch: 100 | Loss: 1.676 | Acc: 39.09%
25
- 2025-03-14 18:44:03,286 - train - INFO - Epoch: 5 | Batch: 200 | Loss: 1.660 | Acc: 39.65%
26
- 2025-03-14 18:44:05,565 - train - INFO - Epoch: 5 | Batch: 300 | Loss: 1.673 | Acc: 39.22%
27
- 2025-03-14 18:44:09,108 - train - INFO - Epoch: 5 | Test Loss: 1.637 | Test Acc: 40.55%
28
- 2025-03-14 18:44:09,274 - train - INFO - Epoch: 6 | Batch: 0 | Loss: 1.719 | Acc: 36.72%
29
- 2025-03-14 18:44:11,561 - train - INFO - Epoch: 6 | Batch: 100 | Loss: 1.622 | Acc: 40.80%
30
- 2025-03-14 18:44:14,102 - train - INFO - Epoch: 6 | Batch: 200 | Loss: 1.645 | Acc: 40.35%
31
- 2025-03-14 18:44:16,595 - train - INFO - Epoch: 6 | Batch: 300 | Loss: 1.655 | Acc: 40.03%
32
- 2025-03-14 18:44:20,643 - train - INFO - Epoch: 6 | Test Loss: 1.514 | Test Acc: 47.29%
33
- 2025-03-14 18:44:30,165 - train - INFO - Epoch: 7 | Batch: 0 | Loss: 1.487 | Acc: 50.78%
34
- 2025-03-14 18:44:32,311 - train - INFO - Epoch: 7 | Batch: 100 | Loss: 1.638 | Acc: 40.48%
35
- 2025-03-14 18:44:34,629 - train - INFO - Epoch: 7 | Batch: 200 | Loss: 1.641 | Acc: 40.55%
36
- 2025-03-14 18:44:36,796 - train - INFO - Epoch: 7 | Batch: 300 | Loss: 1.642 | Acc: 40.62%
37
- 2025-03-14 18:44:40,052 - train - INFO - Epoch: 7 | Test Loss: 1.670 | Test Acc: 41.48%
38
- 2025-03-14 18:44:40,222 - train - INFO - Epoch: 8 | Batch: 0 | Loss: 1.629 | Acc: 38.28%
39
- 2025-03-14 18:44:42,337 - train - INFO - Epoch: 8 | Batch: 100 | Loss: 1.647 | Acc: 40.32%
40
- 2025-03-14 18:44:44,590 - train - INFO - Epoch: 8 | Batch: 200 | Loss: 1.638 | Acc: 40.99%
41
- 2025-03-14 18:44:46,617 - train - INFO - Epoch: 8 | Batch: 300 | Loss: 1.648 | Acc: 40.85%
42
- 2025-03-14 18:44:50,042 - train - INFO - Epoch: 8 | Test Loss: 1.610 | Test Acc: 43.43%
43
- 2025-03-14 18:44:59,307 - train - INFO - Epoch: 9 | Batch: 0 | Loss: 1.663 | Acc: 44.53%
44
- 2025-03-14 18:45:01,655 - train - INFO - Epoch: 9 | Batch: 100 | Loss: 1.638 | Acc: 41.36%
45
- 2025-03-14 18:45:03,999 - train - INFO - Epoch: 9 | Batch: 200 | Loss: 1.647 | Acc: 40.96%
46
- 2025-03-14 18:45:06,123 - train - INFO - Epoch: 9 | Batch: 300 | Loss: 1.646 | Acc: 40.82%
47
- 2025-03-14 18:45:09,386 - train - INFO - Epoch: 9 | Test Loss: 1.465 | Test Acc: 48.76%
48
- 2025-03-14 18:45:09,543 - train - INFO - Epoch: 10 | Batch: 0 | Loss: 1.650 | Acc: 40.62%
49
- 2025-03-14 18:45:11,645 - train - INFO - Epoch: 10 | Batch: 100 | Loss: 1.581 | Acc: 42.72%
50
- 2025-03-14 18:45:13,854 - train - INFO - Epoch: 10 | Batch: 200 | Loss: 1.589 | Acc: 42.82%
51
- 2025-03-14 18:45:16,028 - train - INFO - Epoch: 10 | Batch: 300 | Loss: 1.592 | Acc: 42.91%
52
- 2025-03-14 18:45:19,650 - train - INFO - Epoch: 10 | Test Loss: 1.483 | Test Acc: 48.24%
53
- 2025-03-14 18:45:30,113 - train - INFO - Epoch: 11 | Batch: 0 | Loss: 1.494 | Acc: 42.97%
54
- 2025-03-14 18:45:32,744 - train - INFO - Epoch: 11 | Batch: 100 | Loss: 1.616 | Acc: 42.26%
55
- 2025-03-14 18:45:35,132 - train - INFO - Epoch: 11 | Batch: 200 | Loss: 1.625 | Acc: 42.03%
56
- 2025-03-14 18:45:37,374 - train - INFO - Epoch: 11 | Batch: 300 | Loss: 1.603 | Acc: 42.96%
57
- 2025-03-14 18:45:40,850 - train - INFO - Epoch: 11 | Test Loss: 1.505 | Test Acc: 48.63%
58
- 2025-03-14 18:45:41,037 - train - INFO - Epoch: 12 | Batch: 0 | Loss: 1.586 | Acc: 46.09%
59
- 2025-03-14 18:45:43,281 - train - INFO - Epoch: 12 | Batch: 100 | Loss: 1.577 | Acc: 44.79%
60
- 2025-03-14 18:45:45,488 - train - INFO - Epoch: 12 | Batch: 200 | Loss: 1.576 | Acc: 44.34%
61
- 2025-03-14 18:45:47,756 - train - INFO - Epoch: 12 | Batch: 300 | Loss: 1.591 | Acc: 43.89%
62
- 2025-03-14 18:45:51,120 - train - INFO - Epoch: 12 | Test Loss: 1.605 | Test Acc: 44.80%
63
- 2025-03-14 18:46:00,438 - train - INFO - Epoch: 13 | Batch: 0 | Loss: 1.518 | Acc: 44.53%
64
- 2025-03-14 18:46:02,653 - train - INFO - Epoch: 13 | Batch: 100 | Loss: 1.599 | Acc: 42.95%
65
- 2025-03-14 18:46:05,275 - train - INFO - Epoch: 13 | Batch: 200 | Loss: 1.593 | Acc: 43.37%
66
- 2025-03-14 18:46:07,588 - train - INFO - Epoch: 13 | Batch: 300 | Loss: 1.598 | Acc: 43.51%
67
- 2025-03-14 18:46:10,937 - train - INFO - Epoch: 13 | Test Loss: 1.583 | Test Acc: 42.85%
68
- 2025-03-14 18:46:11,116 - train - INFO - Epoch: 14 | Batch: 0 | Loss: 1.560 | Acc: 44.53%
69
- 2025-03-14 18:46:13,285 - train - INFO - Epoch: 14 | Batch: 100 | Loss: 1.569 | Acc: 44.65%
70
- 2025-03-14 18:46:15,533 - train - INFO - Epoch: 14 | Batch: 200 | Loss: 1.577 | Acc: 43.93%
71
- 2025-03-14 18:46:17,803 - train - INFO - Epoch: 14 | Batch: 300 | Loss: 1.580 | Acc: 43.98%
72
- 2025-03-14 18:46:21,356 - train - INFO - Epoch: 14 | Test Loss: 1.633 | Test Acc: 44.51%
73
- 2025-03-14 18:46:31,128 - train - INFO - Epoch: 15 | Batch: 0 | Loss: 1.845 | Acc: 38.28%
74
- 2025-03-14 18:46:33,986 - train - INFO - Epoch: 15 | Batch: 100 | Loss: 1.574 | Acc: 44.63%
75
- 2025-03-14 18:46:36,643 - train - INFO - Epoch: 15 | Batch: 200 | Loss: 1.581 | Acc: 44.63%
76
- 2025-03-14 18:46:38,812 - train - INFO - Epoch: 15 | Batch: 300 | Loss: 1.582 | Acc: 44.44%
77
- 2025-03-14 18:46:42,100 - train - INFO - Epoch: 15 | Test Loss: 1.502 | Test Acc: 47.49%
78
- 2025-03-14 18:46:42,283 - train - INFO - Epoch: 16 | Batch: 0 | Loss: 1.625 | Acc: 40.62%
79
- 2025-03-14 18:46:44,700 - train - INFO - Epoch: 16 | Batch: 100 | Loss: 1.541 | Acc: 45.17%
80
- 2025-03-14 18:46:46,924 - train - INFO - Epoch: 16 | Batch: 200 | Loss: 1.555 | Acc: 44.73%
81
- 2025-03-14 18:46:49,212 - train - INFO - Epoch: 16 | Batch: 300 | Loss: 1.552 | Acc: 45.09%
82
- 2025-03-14 18:46:52,741 - train - INFO - Epoch: 16 | Test Loss: 1.515 | Test Acc: 47.01%
83
- 2025-03-14 18:47:01,778 - train - INFO - Epoch: 17 | Batch: 0 | Loss: 1.648 | Acc: 42.19%
84
- 2025-03-14 18:47:03,963 - train - INFO - Epoch: 17 | Batch: 100 | Loss: 1.568 | Acc: 45.44%
85
- 2025-03-14 18:47:06,143 - train - INFO - Epoch: 17 | Batch: 200 | Loss: 1.559 | Acc: 45.42%
86
- 2025-03-14 18:47:08,345 - train - INFO - Epoch: 17 | Batch: 300 | Loss: 1.569 | Acc: 45.12%
87
- 2025-03-14 18:47:11,733 - train - INFO - Epoch: 17 | Test Loss: 1.570 | Test Acc: 45.34%
88
- 2025-03-14 18:47:11,921 - train - INFO - Epoch: 18 | Batch: 0 | Loss: 1.661 | Acc: 39.06%
89
- 2025-03-14 18:47:14,198 - train - INFO - Epoch: 18 | Batch: 100 | Loss: 1.577 | Acc: 43.73%
90
- 2025-03-14 18:47:16,401 - train - INFO - Epoch: 18 | Batch: 200 | Loss: 1.587 | Acc: 44.04%
91
- 2025-03-14 18:47:18,532 - train - INFO - Epoch: 18 | Batch: 300 | Loss: 1.583 | Acc: 44.23%
92
- 2025-03-14 18:47:21,929 - train - INFO - Epoch: 18 | Test Loss: 1.490 | Test Acc: 48.95%
93
- 2025-03-14 18:47:31,595 - train - INFO - Epoch: 19 | Batch: 0 | Loss: 1.469 | Acc: 47.66%
94
- 2025-03-14 18:47:34,012 - train - INFO - Epoch: 19 | Batch: 100 | Loss: 1.572 | Acc: 44.14%
95
- 2025-03-14 18:47:36,582 - train - INFO - Epoch: 19 | Batch: 200 | Loss: 1.564 | Acc: 44.89%
96
- 2025-03-14 18:47:39,025 - train - INFO - Epoch: 19 | Batch: 300 | Loss: 1.580 | Acc: 44.51%
97
- 2025-03-14 18:47:43,410 - train - INFO - Epoch: 19 | Test Loss: 1.614 | Test Acc: 41.68%
98
- 2025-03-14 18:47:43,603 - train - INFO - Epoch: 20 | Batch: 0 | Loss: 1.780 | Acc: 35.16%
99
- 2025-03-14 18:47:45,962 - train - INFO - Epoch: 20 | Batch: 100 | Loss: 1.593 | Acc: 43.56%
100
- 2025-03-14 18:47:48,244 - train - INFO - Epoch: 20 | Batch: 200 | Loss: 1.582 | Acc: 44.22%
101
- 2025-03-14 18:47:50,397 - train - INFO - Epoch: 20 | Batch: 300 | Loss: 1.564 | Acc: 44.98%
102
- 2025-03-14 18:47:53,620 - train - INFO - Epoch: 20 | Test Loss: 1.466 | Test Acc: 48.39%
103
- 2025-03-14 18:48:02,849 - train - INFO - Epoch: 21 | Batch: 0 | Loss: 1.409 | Acc: 50.00%
104
- 2025-03-14 18:48:05,101 - train - INFO - Epoch: 21 | Batch: 100 | Loss: 1.554 | Acc: 45.34%
105
- 2025-03-14 18:48:07,205 - train - INFO - Epoch: 21 | Batch: 200 | Loss: 1.559 | Acc: 45.21%
106
- 2025-03-14 18:48:09,387 - train - INFO - Epoch: 21 | Batch: 300 | Loss: 1.567 | Acc: 44.95%
107
- 2025-03-14 18:48:12,828 - train - INFO - Epoch: 21 | Test Loss: 1.541 | Test Acc: 45.92%
108
- 2025-03-14 18:48:12,998 - train - INFO - Epoch: 22 | Batch: 0 | Loss: 1.420 | Acc: 50.00%
109
- 2025-03-14 18:48:15,472 - train - INFO - Epoch: 22 | Batch: 100 | Loss: 1.553 | Acc: 45.12%
110
- 2025-03-14 18:48:17,608 - train - INFO - Epoch: 22 | Batch: 200 | Loss: 1.546 | Acc: 45.46%
111
- 2025-03-14 18:48:19,794 - train - INFO - Epoch: 22 | Batch: 300 | Loss: 1.551 | Acc: 45.30%
112
- 2025-03-14 18:48:23,214 - train - INFO - Epoch: 22 | Test Loss: 1.537 | Test Acc: 46.88%
113
- 2025-03-14 18:48:32,459 - train - INFO - Epoch: 23 | Batch: 0 | Loss: 1.594 | Acc: 42.19%
114
- 2025-03-14 18:48:34,612 - train - INFO - Epoch: 23 | Batch: 100 | Loss: 1.580 | Acc: 44.72%
115
- 2025-03-14 18:48:36,785 - train - INFO - Epoch: 23 | Batch: 200 | Loss: 1.560 | Acc: 45.09%
116
- 2025-03-14 18:48:38,969 - train - INFO - Epoch: 23 | Batch: 300 | Loss: 1.561 | Acc: 45.15%
117
- 2025-03-14 18:48:42,488 - train - INFO - Epoch: 23 | Test Loss: 1.570 | Test Acc: 45.55%
118
- 2025-03-14 18:48:42,659 - train - INFO - Epoch: 24 | Batch: 0 | Loss: 1.642 | Acc: 44.53%
119
- 2025-03-14 18:48:44,937 - train - INFO - Epoch: 24 | Batch: 100 | Loss: 1.577 | Acc: 44.83%
120
- 2025-03-14 18:48:47,587 - train - INFO - Epoch: 24 | Batch: 200 | Loss: 1.587 | Acc: 44.60%
121
- 2025-03-14 18:48:50,078 - train - INFO - Epoch: 24 | Batch: 300 | Loss: 1.570 | Acc: 45.19%
122
- 2025-03-14 18:48:53,664 - train - INFO - Epoch: 24 | Test Loss: 1.460 | Test Acc: 51.03%
123
- 2025-03-14 18:49:03,866 - train - INFO - Epoch: 25 | Batch: 0 | Loss: 1.401 | Acc: 47.66%
124
- 2025-03-14 18:49:05,995 - train - INFO - Epoch: 25 | Batch: 100 | Loss: 1.530 | Acc: 45.76%
125
- 2025-03-14 18:49:08,111 - train - INFO - Epoch: 25 | Batch: 200 | Loss: 1.514 | Acc: 46.54%
126
- 2025-03-14 18:49:10,316 - train - INFO - Epoch: 25 | Batch: 300 | Loss: 1.529 | Acc: 46.19%
127
- 2025-03-14 18:49:13,929 - train - INFO - Epoch: 25 | Test Loss: 1.556 | Test Acc: 47.06%
128
- 2025-03-14 18:49:14,187 - train - INFO - Epoch: 26 | Batch: 0 | Loss: 1.722 | Acc: 40.62%
129
- 2025-03-14 18:49:16,552 - train - INFO - Epoch: 26 | Batch: 100 | Loss: 1.515 | Acc: 46.69%
130
- 2025-03-14 18:49:18,799 - train - INFO - Epoch: 26 | Batch: 200 | Loss: 1.527 | Acc: 46.61%
131
- 2025-03-14 18:49:20,898 - train - INFO - Epoch: 26 | Batch: 300 | Loss: 1.541 | Acc: 46.01%
132
- 2025-03-14 18:49:24,249 - train - INFO - Epoch: 26 | Test Loss: 1.403 | Test Acc: 50.95%
133
- 2025-03-14 18:49:33,283 - train - INFO - Epoch: 27 | Batch: 0 | Loss: 1.368 | Acc: 50.00%
134
- 2025-03-14 18:49:35,438 - train - INFO - Epoch: 27 | Batch: 100 | Loss: 1.541 | Acc: 46.12%
135
- 2025-03-14 18:49:37,619 - train - INFO - Epoch: 27 | Batch: 200 | Loss: 1.546 | Acc: 46.12%
136
- 2025-03-14 18:49:39,907 - train - INFO - Epoch: 27 | Batch: 300 | Loss: 1.558 | Acc: 45.75%
137
- 2025-03-14 18:49:43,295 - train - INFO - Epoch: 27 | Test Loss: 1.593 | Test Acc: 44.67%
138
- 2025-03-14 18:49:43,465 - train - INFO - Epoch: 28 | Batch: 0 | Loss: 1.712 | Acc: 39.84%
139
- 2025-03-14 18:49:45,800 - train - INFO - Epoch: 28 | Batch: 100 | Loss: 1.551 | Acc: 46.37%
140
- 2025-03-14 18:49:47,998 - train - INFO - Epoch: 28 | Batch: 200 | Loss: 1.551 | Acc: 46.39%
141
- 2025-03-14 18:49:50,134 - train - INFO - Epoch: 28 | Batch: 300 | Loss: 1.538 | Acc: 46.92%
142
- 2025-03-14 18:49:53,378 - train - INFO - Epoch: 28 | Test Loss: 1.490 | Test Acc: 46.88%
143
- 2025-03-14 18:50:02,514 - train - INFO - Epoch: 29 | Batch: 0 | Loss: 1.512 | Acc: 41.41%
144
- 2025-03-14 18:50:04,738 - train - INFO - Epoch: 29 | Batch: 100 | Loss: 1.560 | Acc: 45.56%
145
- 2025-03-14 18:50:06,892 - train - INFO - Epoch: 29 | Batch: 200 | Loss: 1.530 | Acc: 46.73%
146
- 2025-03-14 18:50:09,049 - train - INFO - Epoch: 29 | Batch: 300 | Loss: 1.541 | Acc: 46.39%
147
- 2025-03-14 18:50:12,354 - train - INFO - Epoch: 29 | Test Loss: 1.536 | Test Acc: 46.64%
148
- 2025-03-14 18:50:12,524 - train - INFO - Epoch: 30 | Batch: 0 | Loss: 1.597 | Acc: 46.09%
149
- 2025-03-14 18:50:14,711 - train - INFO - Epoch: 30 | Batch: 100 | Loss: 1.548 | Acc: 46.50%
150
- 2025-03-14 18:50:16,934 - train - INFO - Epoch: 30 | Batch: 200 | Loss: 1.556 | Acc: 45.93%
151
- 2025-03-14 18:50:19,218 - train - INFO - Epoch: 30 | Batch: 300 | Loss: 1.543 | Acc: 46.32%
152
- 2025-03-14 18:50:22,723 - train - INFO - Epoch: 30 | Test Loss: 1.452 | Test Acc: 50.77%
153
- 2025-03-14 18:50:32,129 - train - INFO - Epoch: 31 | Batch: 0 | Loss: 1.449 | Acc: 49.22%
154
- 2025-03-14 18:50:34,311 - train - INFO - Epoch: 31 | Batch: 100 | Loss: 1.533 | Acc: 47.13%
155
- 2025-03-14 18:50:36,539 - train - INFO - Epoch: 31 | Batch: 200 | Loss: 1.542 | Acc: 46.49%
156
- 2025-03-14 18:50:38,682 - train - INFO - Epoch: 31 | Batch: 300 | Loss: 1.539 | Acc: 46.58%
157
- 2025-03-14 18:50:42,089 - train - INFO - Epoch: 31 | Test Loss: 1.386 | Test Acc: 52.82%
158
- 2025-03-14 18:50:42,259 - train - INFO - Epoch: 32 | Batch: 0 | Loss: 1.537 | Acc: 49.22%
159
- 2025-03-14 18:50:44,519 - train - INFO - Epoch: 32 | Batch: 100 | Loss: 1.507 | Acc: 47.93%
160
- 2025-03-14 18:50:46,656 - train - INFO - Epoch: 32 | Batch: 200 | Loss: 1.502 | Acc: 47.95%
161
- 2025-03-14 18:50:48,818 - train - INFO - Epoch: 32 | Batch: 300 | Loss: 1.502 | Acc: 47.57%
162
- 2025-03-14 18:50:52,272 - train - INFO - Epoch: 32 | Test Loss: 1.482 | Test Acc: 48.45%
163
- 2025-03-14 18:51:01,650 - train - INFO - Epoch: 33 | Batch: 0 | Loss: 1.600 | Acc: 42.97%
164
- 2025-03-14 18:51:03,937 - train - INFO - Epoch: 33 | Batch: 100 | Loss: 1.516 | Acc: 46.92%
165
- 2025-03-14 18:51:06,268 - train - INFO - Epoch: 33 | Batch: 200 | Loss: 1.528 | Acc: 46.76%
166
- 2025-03-14 18:51:08,704 - train - INFO - Epoch: 33 | Batch: 300 | Loss: 1.534 | Acc: 46.62%
167
- 2025-03-14 18:51:12,262 - train - INFO - Epoch: 33 | Test Loss: 1.430 | Test Acc: 49.99%
168
- 2025-03-14 18:51:12,443 - train - INFO - Epoch: 34 | Batch: 0 | Loss: 1.585 | Acc: 45.31%
169
- 2025-03-14 18:51:14,733 - train - INFO - Epoch: 34 | Batch: 100 | Loss: 1.501 | Acc: 47.97%
170
- 2025-03-14 18:51:17,010 - train - INFO - Epoch: 34 | Batch: 200 | Loss: 1.512 | Acc: 47.52%
171
- 2025-03-14 18:51:19,395 - train - INFO - Epoch: 34 | Batch: 300 | Loss: 1.514 | Acc: 47.37%
172
- 2025-03-14 18:51:23,243 - train - INFO - Epoch: 34 | Test Loss: 1.407 | Test Acc: 52.49%
173
- 2025-03-14 18:51:32,787 - train - INFO - Epoch: 35 | Batch: 0 | Loss: 1.449 | Acc: 53.12%
174
- 2025-03-14 18:51:35,089 - train - INFO - Epoch: 35 | Batch: 100 | Loss: 1.533 | Acc: 46.77%
175
- 2025-03-14 18:51:37,330 - train - INFO - Epoch: 35 | Batch: 200 | Loss: 1.508 | Acc: 47.78%
176
- 2025-03-14 18:51:39,569 - train - INFO - Epoch: 35 | Batch: 300 | Loss: 1.505 | Acc: 47.97%
177
- 2025-03-14 18:51:43,190 - train - INFO - Epoch: 35 | Test Loss: 1.518 | Test Acc: 48.63%
178
- 2025-03-14 18:51:43,386 - train - INFO - Epoch: 36 | Batch: 0 | Loss: 1.662 | Acc: 41.41%
179
- 2025-03-14 18:51:45,737 - train - INFO - Epoch: 36 | Batch: 100 | Loss: 1.537 | Acc: 46.06%
180
- 2025-03-14 18:51:47,974 - train - INFO - Epoch: 36 | Batch: 200 | Loss: 1.524 | Acc: 46.58%
181
- 2025-03-14 18:51:50,202 - train - INFO - Epoch: 36 | Batch: 300 | Loss: 1.526 | Acc: 46.55%
182
- 2025-03-14 18:51:53,752 - train - INFO - Epoch: 36 | Test Loss: 1.361 | Test Acc: 53.00%
183
- 2025-03-14 18:52:03,416 - train - INFO - Epoch: 37 | Batch: 0 | Loss: 1.415 | Acc: 48.44%
184
- 2025-03-14 18:52:05,593 - train - INFO - Epoch: 37 | Batch: 100 | Loss: 1.575 | Acc: 44.76%
185
- 2025-03-14 18:52:07,827 - train - INFO - Epoch: 37 | Batch: 200 | Loss: 1.538 | Acc: 45.99%
186
- 2025-03-14 18:52:09,999 - train - INFO - Epoch: 37 | Batch: 300 | Loss: 1.526 | Acc: 46.51%
187
- 2025-03-14 18:52:13,370 - train - INFO - Epoch: 37 | Test Loss: 1.458 | Test Acc: 49.32%
188
- 2025-03-14 18:52:13,541 - train - INFO - Epoch: 38 | Batch: 0 | Loss: 1.553 | Acc: 49.22%
189
- 2025-03-14 18:52:15,740 - train - INFO - Epoch: 38 | Batch: 100 | Loss: 1.476 | Acc: 49.10%
190
- 2025-03-14 18:52:17,984 - train - INFO - Epoch: 38 | Batch: 200 | Loss: 1.475 | Acc: 48.56%
191
- 2025-03-14 18:52:20,396 - train - INFO - Epoch: 38 | Batch: 300 | Loss: 1.509 | Acc: 47.59%
192
- 2025-03-14 18:52:23,784 - train - INFO - Epoch: 38 | Test Loss: 1.395 | Test Acc: 51.78%
193
- 2025-03-14 18:52:32,947 - train - INFO - Epoch: 39 | Batch: 0 | Loss: 1.398 | Acc: 51.56%
194
- 2025-03-14 18:52:35,194 - train - INFO - Epoch: 39 | Batch: 100 | Loss: 1.582 | Acc: 44.60%
195
- 2025-03-14 18:52:37,297 - train - INFO - Epoch: 39 | Batch: 200 | Loss: 1.554 | Acc: 45.54%
196
- 2025-03-14 18:52:39,638 - train - INFO - Epoch: 39 | Batch: 300 | Loss: 1.534 | Acc: 46.39%
197
- 2025-03-14 18:52:43,108 - train - INFO - Epoch: 39 | Test Loss: 1.525 | Test Acc: 45.86%
198
- 2025-03-14 18:52:43,297 - train - INFO - Epoch: 40 | Batch: 0 | Loss: 1.696 | Acc: 32.81%
199
- 2025-03-14 18:52:45,599 - train - INFO - Epoch: 40 | Batch: 100 | Loss: 1.522 | Acc: 47.46%
200
- 2025-03-14 18:52:47,940 - train - INFO - Epoch: 40 | Batch: 200 | Loss: 1.516 | Acc: 47.49%
201
- 2025-03-14 18:52:50,269 - train - INFO - Epoch: 40 | Batch: 300 | Loss: 1.508 | Acc: 47.60%
202
- 2025-03-14 18:52:53,724 - train - INFO - Epoch: 40 | Test Loss: 1.450 | Test Acc: 51.11%
203
- 2025-03-14 18:53:03,410 - train - INFO - Epoch: 41 | Batch: 0 | Loss: 1.537 | Acc: 47.66%
204
- 2025-03-14 18:53:05,800 - train - INFO - Epoch: 41 | Batch: 100 | Loss: 1.507 | Acc: 47.56%
205
- 2025-03-14 18:53:08,177 - train - INFO - Epoch: 41 | Batch: 200 | Loss: 1.514 | Acc: 47.43%
206
- 2025-03-14 18:53:10,666 - train - INFO - Epoch: 41 | Batch: 300 | Loss: 1.510 | Acc: 47.77%
207
- 2025-03-14 18:53:14,572 - train - INFO - Epoch: 41 | Test Loss: 1.517 | Test Acc: 48.48%
208
- 2025-03-14 18:53:14,742 - train - INFO - Epoch: 42 | Batch: 0 | Loss: 1.796 | Acc: 42.19%
209
- 2025-03-14 18:53:16,888 - train - INFO - Epoch: 42 | Batch: 100 | Loss: 1.473 | Acc: 48.87%
210
- 2025-03-14 18:53:18,973 - train - INFO - Epoch: 42 | Batch: 200 | Loss: 1.491 | Acc: 48.63%
211
- 2025-03-14 18:53:21,259 - train - INFO - Epoch: 42 | Batch: 300 | Loss: 1.505 | Acc: 48.21%
212
- 2025-03-14 18:53:24,601 - train - INFO - Epoch: 42 | Test Loss: 1.561 | Test Acc: 46.57%
213
- 2025-03-14 18:53:33,569 - train - INFO - Epoch: 43 | Batch: 0 | Loss: 1.608 | Acc: 44.53%
214
- 2025-03-14 18:53:35,694 - train - INFO - Epoch: 43 | Batch: 100 | Loss: 1.491 | Acc: 48.58%
215
- 2025-03-14 18:53:37,928 - train - INFO - Epoch: 43 | Batch: 200 | Loss: 1.491 | Acc: 48.62%
216
- 2025-03-14 18:53:40,500 - train - INFO - Epoch: 43 | Batch: 300 | Loss: 1.486 | Acc: 48.66%
217
- 2025-03-14 18:53:43,816 - train - INFO - Epoch: 43 | Test Loss: 1.374 | Test Acc: 52.62%
218
- 2025-03-14 18:53:43,991 - train - INFO - Epoch: 44 | Batch: 0 | Loss: 1.489 | Acc: 49.22%
219
- 2025-03-14 18:53:46,176 - train - INFO - Epoch: 44 | Batch: 100 | Loss: 1.486 | Acc: 48.23%
220
- 2025-03-14 18:53:48,303 - train - INFO - Epoch: 44 | Batch: 200 | Loss: 1.498 | Acc: 47.92%
221
- 2025-03-14 18:53:50,370 - train - INFO - Epoch: 44 | Batch: 300 | Loss: 1.487 | Acc: 48.34%
222
- 2025-03-14 18:53:53,740 - train - INFO - Epoch: 44 | Test Loss: 1.373 | Test Acc: 52.73%
223
- 2025-03-14 18:54:03,408 - train - INFO - Epoch: 45 | Batch: 0 | Loss: 1.566 | Acc: 47.66%
224
- 2025-03-14 18:54:05,572 - train - INFO - Epoch: 45 | Batch: 100 | Loss: 1.531 | Acc: 47.00%
225
- 2025-03-14 18:54:07,745 - train - INFO - Epoch: 45 | Batch: 200 | Loss: 1.512 | Acc: 47.59%
226
- 2025-03-14 18:54:09,919 - train - INFO - Epoch: 45 | Batch: 300 | Loss: 1.500 | Acc: 48.02%
227
- 2025-03-14 18:54:13,351 - train - INFO - Epoch: 45 | Test Loss: 1.426 | Test Acc: 51.30%
228
- 2025-03-14 18:54:13,612 - train - INFO - Epoch: 46 | Batch: 0 | Loss: 1.334 | Acc: 58.59%
229
- 2025-03-14 18:54:15,863 - train - INFO - Epoch: 46 | Batch: 100 | Loss: 1.496 | Acc: 48.13%
230
- 2025-03-14 18:54:18,032 - train - INFO - Epoch: 46 | Batch: 200 | Loss: 1.489 | Acc: 48.47%
231
- 2025-03-14 18:54:20,219 - train - INFO - Epoch: 46 | Batch: 300 | Loss: 1.495 | Acc: 48.29%
232
- 2025-03-14 18:54:23,452 - train - INFO - Epoch: 46 | Test Loss: 1.528 | Test Acc: 48.13%
233
- 2025-03-14 18:54:32,795 - train - INFO - Epoch: 47 | Batch: 0 | Loss: 1.657 | Acc: 38.28%
234
- 2025-03-14 18:54:35,033 - train - INFO - Epoch: 47 | Batch: 100 | Loss: 1.543 | Acc: 46.42%
235
- 2025-03-14 18:54:37,369 - train - INFO - Epoch: 47 | Batch: 200 | Loss: 1.516 | Acc: 47.45%
236
- 2025-03-14 18:54:39,815 - train - INFO - Epoch: 47 | Batch: 300 | Loss: 1.499 | Acc: 48.16%
237
- 2025-03-14 18:54:43,735 - train - INFO - Epoch: 47 | Test Loss: 1.527 | Test Acc: 49.94%
238
- 2025-03-14 18:54:43,988 - train - INFO - Epoch: 48 | Batch: 0 | Loss: 1.510 | Acc: 44.53%
239
- 2025-03-14 18:54:46,176 - train - INFO - Epoch: 48 | Batch: 100 | Loss: 1.503 | Acc: 48.13%
240
- 2025-03-14 18:54:48,509 - train - INFO - Epoch: 48 | Batch: 200 | Loss: 1.501 | Acc: 48.09%
241
- 2025-03-14 18:54:50,812 - train - INFO - Epoch: 48 | Batch: 300 | Loss: 1.490 | Acc: 48.40%
242
- 2025-03-14 18:54:54,075 - train - INFO - Epoch: 48 | Test Loss: 1.414 | Test Acc: 51.34%
243
- 2025-03-14 18:55:02,937 - train - INFO - Epoch: 49 | Batch: 0 | Loss: 1.570 | Acc: 38.28%
244
- 2025-03-14 18:55:05,141 - train - INFO - Epoch: 49 | Batch: 100 | Loss: 1.473 | Acc: 49.60%
245
- 2025-03-14 18:55:07,346 - train - INFO - Epoch: 49 | Batch: 200 | Loss: 1.474 | Acc: 49.48%
246
- 2025-03-14 18:55:09,548 - train - INFO - Epoch: 49 | Batch: 300 | Loss: 1.478 | Acc: 49.18%
247
- 2025-03-14 18:55:12,872 - train - INFO - Epoch: 49 | Test Loss: 1.352 | Test Acc: 53.22%
248
- 2025-03-14 18:55:13,039 - train - INFO - Epoch: 50 | Batch: 0 | Loss: 1.388 | Acc: 51.56%
249
- 2025-03-14 18:55:15,328 - train - INFO - Epoch: 50 | Batch: 100 | Loss: 1.490 | Acc: 48.34%
250
- 2025-03-14 18:55:17,410 - train - INFO - Epoch: 50 | Batch: 200 | Loss: 1.484 | Acc: 48.32%
251
- 2025-03-14 18:55:19,595 - train - INFO - Epoch: 50 | Batch: 300 | Loss: 1.474 | Acc: 49.08%
252
- 2025-03-14 18:55:22,981 - train - INFO - Epoch: 50 | Test Loss: 1.513 | Test Acc: 46.94%
253
- 2025-03-14 18:55:32,176 - train - INFO - 训练完成!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/LeNet5/code/train.py DELETED
@@ -1,63 +0,0 @@
1
- import sys
2
- import os
3
- sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
4
- from utils.dataset_utils import get_cifar10_dataloaders
5
- from utils.train_utils import train_model, train_model_data_augmentation, train_model_backdoor
6
- from utils.parse_args import parse_args
7
- from model import LeNet5
8
-
9
- def main():
10
- # 解析命令行参数
11
- args = parse_args()
12
-
13
- # 创建模型
14
- model = LeNet5()
15
-
16
- if args.train_type == '0':
17
- # 获取数据加载器
18
- trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
19
- # 训练模型
20
- train_model(
21
- model=model,
22
- trainloader=trainloader,
23
- testloader=testloader,
24
- epochs=args.epochs,
25
- lr=args.lr,
26
- device=f'cuda:{args.gpu}',
27
- save_dir='../model',
28
- model_name='lenet5',
29
- save_type='0',
30
- layer_name='conv2',
31
- interval = 2
32
- )
33
- elif args.train_type == '1':
34
- train_model_data_augmentation(
35
- model,
36
- epochs=args.epochs,
37
- lr=args.lr,
38
- device=f'cuda:{args.gpu}',
39
- save_dir='../model',
40
- model_name='lenet5',
41
- batch_size=args.batch_size,
42
- num_workers=args.num_workers,
43
- local_dataset_path=args.dataset_path
44
- )
45
- elif args.train_type == '2':
46
- train_model_backdoor(
47
- model,
48
- poison_ratio=args.poison_ratio,
49
- target_label=args.target_label,
50
- epochs=args.epochs,
51
- lr=args.lr,
52
- device=f'cuda:{args.gpu}',
53
- save_dir='../model',
54
- model_name='lenet5',
55
- batch_size=args.batch_size,
56
- num_workers=args.num_workers,
57
- local_dataset_path=args.dataset_path,
58
- layer_name='conv2',
59
- interval = 2
60
- )
61
-
62
- if __name__ == '__main__':
63
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/LeNet5/dataset/.gitkeep DELETED
File without changes
Image/LeNet5/model/.gitkeep DELETED
File without changes
Image/LeNet5/model/0/epoch1/embeddings.npy DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d655d14885b3382a95cd8f1685db6b4a3e114db764acfe1e8fec0132ccb241b2
3
- size 80000128
 
 
 
 
Image/LeNet5/model/0/epoch10/embeddings.npy DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:665a4d9333b30c8d44f1b314c2c5ba37b6a24555e8d6ce9000f9e15c43dae227
3
- size 80000128
 
 
 
 
Image/LeNet5/model/0/epoch11/embeddings.npy DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa27a313f1475dbb461bbcd4d22ef2fe9f792d1a6977d66a5d51afc21fca9c1a
3
- size 80000128
 
 
 
 
Image/LeNet5/model/0/epoch12/embeddings.npy DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1488b8d7523aea773179f4dfb8d3e23a4236364ba30a418e5a76609ac238a596
3
- size 80000128
 
 
 
 
Image/LeNet5/model/0/epoch13/embeddings.npy DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d49e3a95395193b35e150a9438316248e0ee4a369813751067a77829a42b429a
3
- size 80000128