| | import torch |
| | import torch.nn as nn |
| | import numpy as np |
| | import os |
| | import json |
| | from tqdm import tqdm |
| |
|
| | class time_travel_saver: |
| | """可视化数据提取器 |
| | |
| | 用于保存模型训练过程中的各种数据,包括: |
| | 1. 模型权重 (.pth) |
| | 2. 高维特征 (representation/*.npy) |
| | 3. 预测结果 (prediction/*.npy) |
| | 4. 标签数据 (label/labels.npy) |
| | """ |
| | |
| | def __init__(self, model, dataloader, device, save_dir, model_name, |
| | auto_save_embedding=False, layer_name=None,show = False): |
| | """初始化 |
| | |
| | Args: |
| | model: 要保存的模型实例 |
| | dataloader: 数据加载器(必须是顺序加载的) |
| | device: 计算设备(cpu or gpu) |
| | save_dir: 保存根目录 |
| | model_name: 模型名称 |
| | """ |
| | self.model = model |
| | self.dataloader = dataloader |
| | self.device = device |
| | self.save_dir = save_dir |
| | self.model_name = model_name |
| | self.auto_save = auto_save_embedding |
| | self.layer_name = layer_name |
| |
|
| | if show and not layer_name: |
| | layer_dimensions = self.show_dimensions() |
| | |
| | |
| | def show_dimensions(self): |
| | """显示模型中所有层的名称和对应的维度 |
| | |
| | 这个函数会输出模型中所有层的名称和它们的输出维度, |
| | 帮助用户选择合适的层来提取特征。 |
| | |
| | Returns: |
| | layer_dimensions: 包含层名称和维度的字典 |
| | """ |
| | activation = {} |
| | layer_dimensions = {} |
| | |
| | def get_activation(name): |
| | def hook(model, input, output): |
| | |
| | if name not in activation or activation[name] is None: |
| | |
| | if isinstance(output, tuple): |
| | |
| | activation[name] = output[0].detach() if len(output) > 0 else None |
| | else: |
| | activation[name] = output.detach() |
| | return hook |
| | |
| | |
| | handles = [] |
| | for name, module in self.model.named_modules(): |
| | if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict): |
| | handles.append(module.register_forward_hook(get_activation(name))) |
| | |
| | self.model.eval() |
| | with torch.no_grad(): |
| | |
| | inputs, _ = next(iter(self.dataloader)) |
| | inputs = inputs.to(self.device) |
| | _ = self.model(inputs) |
| | |
| | |
| | print("\n模型各层的名称和维度:") |
| | print("-" * 50) |
| | print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}") |
| | print("-" * 50) |
| | |
| | for name, feat in activation.items(): |
| | if feat is None: |
| | continue |
| | |
| | |
| | feat_dim = feat.view(feat.size(0), -1).size(1) |
| | layer_dimensions[name] = feat_dim |
| | |
| | shape_str = str(list(feat.shape)) |
| | print(f"{name:<40} {feat_dim:<15} {shape_str}") |
| | |
| | print("-" * 50) |
| | print("注: 特征维度是将输出张量展平后的维度大小") |
| | print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层") |
| | print("例如:layer_name='avg_pool'或layer_name='layer4'等") |
| | |
| | |
| | for handle in handles: |
| | handle.remove() |
| | |
| | return layer_dimensions |
| |
|
| | def _extract_features_and_predictions(self): |
| | """提取特征和预测结果 |
| | |
| | Returns: |
| | features: 高维特征 [样本数, 特征维度] |
| | predictions: 预测结果 [样本数, 类别数] |
| | """ |
| | features = [] |
| | predictions = [] |
| | indices = [] |
| | activation = {} |
| | |
| | def get_activation(name): |
| | def hook(model, input, output): |
| | |
| | if name not in activation or activation[name] is None: |
| | |
| | if isinstance(output, tuple): |
| | |
| | activation[name] = output[0].detach() if len(output) > 0 else None |
| | else: |
| | activation[name] = output.detach() |
| | return hook |
| | |
| | |
| |
|
| | |
| | handles = [] |
| | for name, module in self.model.named_modules(): |
| | if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict): |
| | handles.append(module.register_forward_hook(get_activation(name))) |
| | |
| | self.model.eval() |
| | with torch.no_grad(): |
| | |
| | inputs, _ = next(iter(self.dataloader)) |
| | inputs = inputs.to(self.device) |
| | _ = self.model(inputs) |
| | |
| | |
| | if self.layer_name is not None: |
| | if self.layer_name not in activation: |
| | raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中") |
| | |
| | feat = activation[self.layer_name] |
| | if feat is None: |
| | raise ValueError(f"指定的层 {self.layer_name} 没有输出特征") |
| | |
| | suitable_layer_name = self.layer_name |
| | suitable_dim = feat.view(feat.size(0), -1).size(1) |
| | print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") |
| | else: |
| | |
| | target_dim_range = (256, 2048) |
| | suitable_layer_name = None |
| | suitable_dim = None |
| | |
| | |
| | for name, feat in activation.items(): |
| | if feat is None: |
| | continue |
| | feat_dim = feat.view(feat.size(0), -1).size(1) |
| | if target_dim_range[0] <= feat_dim <= target_dim_range[1]: |
| | suitable_layer_name = name |
| | suitable_dim = feat_dim |
| | break |
| | |
| | if suitable_layer_name is None: |
| | raise ValueError("没有找到合适维度的特征层") |
| | |
| | print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}") |
| | |
| | |
| | layer_info = { |
| | 'layer_id': suitable_layer_name, |
| | 'dim': suitable_dim |
| | } |
| | layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json') |
| | with open(layer_info_path, 'w') as f: |
| | json.dump(layer_info, f) |
| | |
| | |
| | activation.clear() |
| | |
| | |
| | for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")): |
| | inputs = inputs.to(self.device) |
| | outputs = self.model(inputs) |
| | |
| | |
| | feat = activation[suitable_layer_name] |
| | flat_features = torch.flatten(feat, start_dim=1) |
| | features.append(flat_features.cpu().numpy()) |
| | predictions.append(outputs.cpu().numpy()) |
| | |
| | |
| | activation.clear() |
| | |
| | |
| | for handle in handles: |
| | handle.remove() |
| | |
| | if len(features) > 0: |
| | features = np.vstack(features) |
| | predictions = np.vstack(predictions) |
| | return features, predictions |
| | else: |
| | return np.array([]), np.array([]) |
| | |
| | def save_lables_index(self, path): |
| | """保存标签数据和索引信息 |
| | |
| | Args: |
| | path: 保存路径 |
| | """ |
| | os.makedirs(path, exist_ok=True) |
| | labels_path = os.path.join(path, 'labels.npy') |
| | index_path = os.path.join(path, 'index.json') |
| | |
| | |
| | try: |
| | if hasattr(self.dataloader.dataset, 'targets'): |
| | |
| | labels = np.array(self.dataloader.dataset.targets) |
| | elif hasattr(self.dataloader.dataset, 'labels'): |
| | |
| | labels = np.array(self.dataloader.dataset.labels) |
| | else: |
| | |
| | labels = [] |
| | for _, batch_labels in self.dataloader: |
| | labels.append(batch_labels.numpy()) |
| | labels = np.concatenate(labels) |
| | |
| | |
| | np.save(labels_path, labels) |
| | print(f"标签数据已保存到 {labels_path}") |
| | |
| | |
| | num_samples = len(labels) |
| | indices = list(range(num_samples)) |
| | |
| | |
| | index_dict = { |
| | "train": list(range(50000)), |
| | "test": list(range(50000, 60000)), |
| | "validation": [] |
| | } |
| | |
| | |
| | with open(index_path, 'w') as f: |
| | json.dump(index_dict, f, indent=4) |
| | |
| | print(f"数据集索引已保存到 {index_path}") |
| | |
| | except Exception as e: |
| | print(f"保存标签和索引时出错: {e}") |
| |
|
| | def save_checkpoint_embeddings_predictions(self, model = None): |
| | """保存所有数据""" |
| | if model is not None: |
| | self.model = model |
| | |
| | os.makedirs(self.save_dir, exist_ok=True) |
| | model_path = os.path.join(self.save_dir,'model.pth') |
| | torch.save(self.model.state_dict(), model_path) |
| | |
| | if self.auto_save: |
| | |
| | features, predictions = self._extract_features_and_predictions() |
| | |
| | |
| | np.save(os.path.join(self.save_dir, 'embeddings.npy'), features) |
| | |
| | np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions) |
| | print("\n保存了以下数据:") |
| | print(f"- 模型权重: {model_path}") |
| | print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]") |
| | print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]") |