import torch import torch.nn as nn import numpy as np import os import json from tqdm import tqdm class time_travel_saver: """可视化数据保存类 用于保存模型训练过程中的各种数据,包括: 1. 模型权重 (.pth) 2. 高维特征 (representation/*.npy) 3. 预测结果 (prediction/*.npy) 4. 标签数据 (label/labels.npy) """ def __init__(self, model, dataloader, device, save_dir, model_name, interval=1, auto_save_embedding=False, layer_name=None,show = False): """初始化 Args: model: 要保存的模型实例 dataloader: 数据加载器(必须是顺序加载的) device: 计算设备(cpu or gpu) save_dir: 保存根目录 model_name: 模型名称 interval: epoch的保存间隔 """ self.model = model self.dataloader = dataloader self.device = device self.save_dir = save_dir self.model_name = model_name self.interval = interval self.auto_save = auto_save_embedding self.layer_name = layer_name # 获取当前epoch if len(os.listdir(self.save_dir)) == 0: self.current_epoch = 1 else: self.current_epoch = len(os.listdir(self.save_dir)) if show: layer_dimensions = self.show_dimensions() # print(layer_dimensions) def show_dimensions(self): """显示模型中所有层的名称和对应的维度 这个函数会输出模型中所有层的名称和它们的输出维度, 帮助用户选择合适的层来提取特征。 Returns: layer_dimensions: 包含层名称和维度的字典 """ activation = {} layer_dimensions = {} def get_activation(name): def hook(model, input, output): activation[name] = output.detach() return hook # 注册钩子到所有层 handles = [] for name, module in self.model.named_modules(): if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict): handles.append(module.register_forward_hook(get_activation(name))) self.model.eval() with torch.no_grad(): # 获取一个batch来分析每层的输出维度 inputs, _ = next(iter(self.dataloader)) inputs = inputs.to(self.device) _ = self.model(inputs) # 分析所有层的输出维度 print("\n模型各层的名称和维度:") print("-" * 50) print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}") print("-" * 50) for name, feat in activation.items(): if feat is None: continue # 获取特征维度(展平后) feat_dim = feat.view(feat.size(0), -1).size(1) layer_dimensions[name] = feat_dim # 打印层信息 shape_str = str(list(feat.shape)) print(f"{name:<40} {feat_dim:<15} {shape_str}") print("-" * 50) print("注: 特征维度是将输出张量展平后的维度大小") # 移除所有钩子 for handle in handles: handle.remove() return layer_dimensions def _extract_features_and_predictions(self): """提取特征和预测结果 Returns: features: 高维特征 [样本数, 特征维度] predictions: 预测结果 [样本数, 类别数] """ features = [] predictions = [] indices = [] activation = {} def get_activation(name): def hook(model, input, output): # 只在需要时保存激活值,避免内存浪费 if name not in activation or activation[name] is None: activation[name] = output.detach() return hook # 根据层的名称或维度来选择层 # 注册钩子到所有层 handles = [] for name, module in self.model.named_modules(): if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict): handles.append(module.register_forward_hook(get_activation(name))) self.model.eval() with torch.no_grad(): # 首先获取一个batch来分析每层的输出维度 inputs, _ = next(iter(self.dataloader)) inputs = inputs.to(self.device) _ = self.model(inputs) # 如果指定了层名,则直接使用该层 if self.layer_name is not None: if self.layer_name not in activation: raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中") feat = activation[self.layer_name] if feat is None: raise ValueError(f"指定的层 {self.layer_name} 没有输出特征") suitable_layer_name = self.layer_name suitable_dim = feat.view(feat.size(0), -1).size(1) print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") else: # 找到维度在指定范围内的层 target_dim_range = (256, 2048) suitable_layer_name = None suitable_dim = None # 分析所有层的输出维度 for name, feat in activation.items(): if feat is None: continue feat_dim = feat.view(feat.size(0), -1).size(1) if target_dim_range[0] <= feat_dim <= target_dim_range[1]: suitable_layer_name = name suitable_dim = feat_dim break if suitable_layer_name is None: raise ValueError("没有找到合适维度的特征层") print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") # 保存层信息 layer_info = { 'layer_id': suitable_layer_name, 'dim': suitable_dim } layer_info_path = os.path.join(self.save_dir, 'layer_info.json') with open(layer_info_path, 'w') as f: json.dump(layer_info, f) # 清除第一次运行的激活值 activation.clear() # 现在处理所有数据 for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")): inputs = inputs.to(self.device) outputs = self.model(inputs) # 获取预测结果 # 获取并处理特征 feat = activation[suitable_layer_name] flat_features = torch.flatten(feat, start_dim=1) features.append(flat_features.cpu().numpy()) # 清除本次的激活值 activation.clear() # 移除所有钩子 for handle in handles: handle.remove() if len(features) > 0: features = np.vstack(features) return features else: return np.array([]) def save(self, model = None): """保存所有数据""" if model is not None: self.model = model # 保存模型权重 os.makedirs(os.path.join(self.save_dir, f'epoch{self.current_epoch}'), exist_ok=True) model_path = os.path.join(self.save_dir, f'epoch{self.current_epoch}', 'subject_model.pth') torch.save(self.model.state_dict(), model_path) if self.auto_save: # 提取并保存特征和预测结果 features = self._extract_features_and_predictions() # 保存特征 np.save(os.path.join(self.save_dir, f'epoch{self.current_epoch}', 'embeddings.npy'), features) print(f"Epoch {self.current_epoch * self.interval} 的数据已保存:") print(f"- 模型权重: {model_path}") print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]") print(f"Epoch {self.current_epoch} 的数据已保存")