Spaces:
Sleeping
Sleeping
File size: 4,048 Bytes
556849d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import pickle
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from torch.utils.data import Dataset, DataLoader
import net_BNN
# 使用已训练的模型进行推理
def test_plot(dataloader,model,device,criterion):
total_loss = 0.0
num_samples = 0
num_sample = 10
model.eval()
with torch.no_grad():
predictions = []
true_labels = []
cov = []
# upp = []
# low = []
for test_features, test_labels in dataloader:
# outputs = model(test_features.to(device))
# MC sample
sample_output = torch.zeros(num_sample, test_labels.shape[0]).to(device)
for i in range(num_sample):
sample_output[i] = model(test_features.to(device)).reshape(-1)
outputs = torch.Tensor(sample_output.mean(dim=0).unsqueeze(1)).to(device)
covs = torch.Tensor(sample_output.std(dim=0).unsqueeze(1)).to(device)
loss = criterion(outputs, test_labels.to(device))
total_loss += loss.item() * test_features.size(0)
num_samples += test_features.size(0)
predictions.append((outputs).tolist())
cov.append(covs.tolist())
true_labels.append((test_labels).tolist())
# upp.append(np.percentile(sample_output.cpu().detach().numpy() * float(C_test),95).tolist())
# low.append(np.percentile(sample_output.cpu().detach().numpy() * float(C_test),5).tolist())
average_loss = total_loss / num_samples
print('Validation Loss: {:.8f}'.format(average_loss))
#
x = range(len(sum(predictions, [])))
pred_array = np.array(sum(predictions, [])).flatten()
var_array = np.array(sum(cov, [])).flatten()
plt.plot(sum(predictions,[]), label='Predictions')
plt.plot(sum(true_labels,[]), label='True Labels')
# plt.fill_between(x, upp, low, alpha=0.5, label='Confidence Interval')
plt.fill_between(x, pred_array + var_array, pred_array - var_array, alpha=0.5, label='Confidence Interval')
plt.legend()
plt.xlabel('Sample Index')
plt.ylabel('Cycle Capacity/Ah')
plt.show()
#
base_path = r'E:\member\ShiJH\Battery Datasets\SNL_18650_LFP Datasets\modified_dataset'
attrib_feature = ['Current','Voltage','Environment_Temperature','Cell_Temperature','SOC']
attrib_label = ['SOH']
with open('./min_max_values.pkl', 'rb') as f:
min_val, max_val = pickle.load(f)
max_len = 200
input_dim = len(attrib_feature)
output_dim = 1
hidden_dim = 64
num_layers = 2
num_heads = 4
lr = 1e-3
max_seq_len = 200
batch_size = 5
C_rated = 1.1
testdata_path = base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv')
test_data = pd.read_csv(testdata_path)
test_data['SOH'] = test_data['cycle capacity'] / test_data['cycle capacity'].values[0]
# C_val = val_data[attrib_label].values[0]
# scaler_val = train_dataset.scaler
test_dataset = net_BNN.CycleDataset(
data=test_data,
attrib_x=attrib_feature,
attrib_y=attrib_label,
max_len=max_len,
C_rated=C_rated,
min_val=min_val,
max_val=max_val,
mode='test')
test_dataloader = DataLoader(test_dataset, batch_size=batch_size,shuffle=False,drop_last=True)
# 导入网络结构
model = net_BNN.ATBNN_Model(
input_dim = input_dim,
output_dim = output_dim,
hidden_dim = hidden_dim,
num_layers = num_layers,
num_heads = num_heads,
batch_size = batch_size,
max_seq_len= max_seq_len)
# model.load_state_dict(torch.load("./model_B/model_epoch634_Trainloss0.00013560_ValLoss0.00107357.pt")) #LOSS=0
# model.load_state_dict(torch.load("./model_B/model_epoch2168_Trainloss0.00001729_ValLoss0.00107112.pt"))
model.load_state_dict(torch.load("./model_epoch8876_Trainloss0.00001546_ValLoss0.00022519.pt"))
# model.load_state_dict(torch.load("./model/model_epoch1.pt"))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)
criterion = nn.MSELoss(reduction='mean')
test_plot(test_dataloader,model,device,criterion) |