Spaces:
Sleeping
Sleeping
Upload 9 files
Browse files- .gitattributes +1 -0
- app.py +188 -0
- min_max_values.pkl +3 -0
- model_epoch8876_Trainloss0.00001546_ValLoss0.00022519.pt +3 -0
- modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv +3 -0
- net_BNN.py +276 -0
- paras.py +14 -0
- requirements.txt +0 -0
- test_BNN.py +127 -0
- train_BNN.py +246 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import pickle
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
from matplotlib import pyplot as plt
|
9 |
+
|
10 |
+
from torch.utils.data import Dataset, DataLoader
|
11 |
+
|
12 |
+
import net_BNN
|
13 |
+
import gradio as gr
|
14 |
+
from gradio.components import *
|
15 |
+
# 使用已训练的模型进行推理
|
16 |
+
class SingleDataset(Dataset):
|
17 |
+
def __init__(self, data, attrib_x, attrib_y, max_len, C_rated, min_val=None, max_val=None, mode='train'):
|
18 |
+
self.data = data
|
19 |
+
self.cycle_indices = data['Cycle_Index'].unique()
|
20 |
+
self.attrib_x = attrib_x
|
21 |
+
self.attrib_y = attrib_y
|
22 |
+
self.C_rated = C_rated
|
23 |
+
self.mode = mode
|
24 |
+
self.max_len = max_len
|
25 |
+
|
26 |
+
self.data['Current'] /= self.C_rated
|
27 |
+
if mode == 'train':
|
28 |
+
self.min_val = data[attrib_x].values.min(axis=0)
|
29 |
+
self.max_val = data[attrib_x].values.max(axis=0)
|
30 |
+
with open('./para_BNN/min_max_values.pkl', 'wb') as f:
|
31 |
+
pickle.dump((self.min_val, self.max_val), f)
|
32 |
+
else:
|
33 |
+
self.min_val = min_val
|
34 |
+
self.max_val = max_val
|
35 |
+
|
36 |
+
|
37 |
+
def get_min_max_values(self):
|
38 |
+
if self.mode != 'train':
|
39 |
+
return None
|
40 |
+
return self.min_val, self.max_val
|
41 |
+
|
42 |
+
|
43 |
+
def __len__(self):
|
44 |
+
return len(self.cycle_indices)
|
45 |
+
|
46 |
+
def get_data_by_cycle_index(self, cycle_index):
|
47 |
+
# 获取指定 cycle_index 的数据
|
48 |
+
cycle_data = self.data[self.data['Cycle_Index'] == cycle_index].copy()
|
49 |
+
|
50 |
+
# 提取特征和标签
|
51 |
+
features = cycle_data[self.attrib_x].values
|
52 |
+
label = cycle_data[self.attrib_y].values[0]
|
53 |
+
|
54 |
+
# 标准化特征
|
55 |
+
features = (features - self.min_val) / (self.max_val - self.min_val)
|
56 |
+
pad_len = self.max_len - len(features)
|
57 |
+
|
58 |
+
features = torch.tensor(features, dtype=torch.float32).clone().detach()
|
59 |
+
features = torch.cat([features, torch.full((pad_len, features.shape[1]), 0)])
|
60 |
+
label = torch.tensor(label, dtype=torch.float32)
|
61 |
+
|
62 |
+
return features, label
|
63 |
+
|
64 |
+
def __getitem__(self, index):
|
65 |
+
cycle_index = self.cycle_indices[index]
|
66 |
+
cycle_data = self.data[self.data['Cycle_Index'] == cycle_index].copy()
|
67 |
+
|
68 |
+
# cycle_data['Current'] /= self.C_rated
|
69 |
+
|
70 |
+
# 提取特征和标签
|
71 |
+
features = cycle_data[self.attrib_x].values
|
72 |
+
# C_ini = cycle_data[self.attrib_y].values[0]
|
73 |
+
label = cycle_data[self.attrib_y].values[0]
|
74 |
+
|
75 |
+
# # 标准化特征
|
76 |
+
features = (features - self.min_val) / (self.max_val - self.min_val)
|
77 |
+
# label = (label - self.y_mean) / self.y_std
|
78 |
+
# features = (features - self.min_val) / self.max_val
|
79 |
+
pad_len = self.max_len - len(features)
|
80 |
+
|
81 |
+
features = torch.tensor(features, dtype=torch.float32).clone().detach()
|
82 |
+
# 在 features 后面填充固定值
|
83 |
+
features = torch.cat([features, torch.full((pad_len, features.shape[1]), 0)])
|
84 |
+
# 转换为张量
|
85 |
+
# features = torch.tensor(padded_features, dtype=torch.float32)
|
86 |
+
label = torch.tensor(label, dtype=torch.float32)
|
87 |
+
# label = label.view(1,1)
|
88 |
+
|
89 |
+
return features, label
|
90 |
+
|
91 |
+
|
92 |
+
def test(model_path, var_path, csv_path, pos):
|
93 |
+
|
94 |
+
attrib_feature = ['Current', 'Voltage', 'Environment_Temperature', 'Cell_Temperature', 'SOC']
|
95 |
+
attrib_label = ['SOH']
|
96 |
+
max_len = 200
|
97 |
+
input_dim = len(attrib_feature)
|
98 |
+
output_dim = 1
|
99 |
+
hidden_dim = 64
|
100 |
+
num_layers = 2
|
101 |
+
num_heads = 4
|
102 |
+
lr = 1e-3
|
103 |
+
max_seq_len = 200
|
104 |
+
batch_size = 1
|
105 |
+
C_rated = 1.1
|
106 |
+
|
107 |
+
model_path = model_path.name
|
108 |
+
var_path = var_path.name
|
109 |
+
csv_path = csv_path.name
|
110 |
+
|
111 |
+
with open(var_path, 'rb') as f:
|
112 |
+
min_val, max_val = pickle.load(f)
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
test_data = pd.read_csv(csv_path)
|
117 |
+
test_data['SOH'] = test_data['cycle capacity'] / test_data['cycle capacity'].values[0]
|
118 |
+
# C_val = val_data[attrib_label].values[0]
|
119 |
+
# scaler_val = train_dataset.scaler
|
120 |
+
dataset = SingleDataset(
|
121 |
+
data=test_data,
|
122 |
+
attrib_x=attrib_feature,
|
123 |
+
attrib_y=attrib_label,
|
124 |
+
max_len=max_len,
|
125 |
+
C_rated=C_rated,
|
126 |
+
min_val=min_val,
|
127 |
+
max_val=max_val,
|
128 |
+
mode='test')
|
129 |
+
features, label = dataset.get_data_by_cycle_index(pos)
|
130 |
+
features = torch.unsqueeze(features, dim=0)
|
131 |
+
# 导入网络结构
|
132 |
+
model = net_BNN.ATBNN_Model(
|
133 |
+
input_dim=input_dim,
|
134 |
+
output_dim=output_dim,
|
135 |
+
hidden_dim=hidden_dim,
|
136 |
+
num_layers=num_layers,
|
137 |
+
num_heads=num_heads,
|
138 |
+
batch_size=batch_size,
|
139 |
+
max_seq_len=max_seq_len)
|
140 |
+
|
141 |
+
# model.load_state_dict(torch.load("./model_B/model_epoch634_Trainloss0.00013560_ValLoss0.00107357.pt")) #LOSS=0
|
142 |
+
# model.load_state_dict(torch.load("./model_B/model_epoch2168_Trainloss0.00001729_ValLoss0.00107112.pt"))
|
143 |
+
model.load_state_dict(torch.load(model_path,map_location=torch.device("cpu")))
|
144 |
+
|
145 |
+
# model.load_state_dict(torch.load("./model/model_epoch1.pt"))
|
146 |
+
device = torch.device('cpu')
|
147 |
+
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
148 |
+
|
149 |
+
num_sample = 100
|
150 |
+
model.eval()
|
151 |
+
with torch.no_grad():
|
152 |
+
predictions = []
|
153 |
+
# true_labels = []
|
154 |
+
cov = []
|
155 |
+
# upp = []
|
156 |
+
# low = []
|
157 |
+
|
158 |
+
# outputs = model(test_features.to(device))
|
159 |
+
|
160 |
+
# MC sample
|
161 |
+
sample_output = torch.zeros(num_sample, label.shape[0]).to(device)
|
162 |
+
for i in range(num_sample):
|
163 |
+
sample_output[i] = model(features.to(device)).reshape(-1)
|
164 |
+
|
165 |
+
outputs = C_rated * torch.Tensor(sample_output.mean(dim=0).unsqueeze(1)).to(device)
|
166 |
+
covs = C_rated * torch.Tensor(sample_output.std(dim=0).unsqueeze(1)).to(device)
|
167 |
+
|
168 |
+
results = "{:.4f}±{:.4f}".format(outputs.item(), covs.item())
|
169 |
+
# true_labels.append((test_labels).tolist())
|
170 |
+
|
171 |
+
return results
|
172 |
+
|
173 |
+
#
|
174 |
+
inputs = [
|
175 |
+
File(label="上传预训练模型"),
|
176 |
+
File(label="上传参数文件"),
|
177 |
+
File(label="上传CSV测试数据"),
|
178 |
+
Number(label="选择循环次数(1~4004)")
|
179 |
+
]
|
180 |
+
|
181 |
+
outputs = [
|
182 |
+
Textbox(label="最大可用容量估计结果"),
|
183 |
+
|
184 |
+
]
|
185 |
+
|
186 |
+
gr.Interface(fn=test, inputs=inputs, outputs=outputs, title="ATBNN Model",
|
187 |
+
description="加载预训练模型,加载测试数据并进行预测,得到当前循环的最大可用容量。").launch(share=True)
|
188 |
+
|
min_max_values.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e17d679a736b3157cb785199f86e94bc25b48c2d48f617f427c8be6b257d2715
|
3 |
+
size 259
|
model_epoch8876_Trainloss0.00001546_ValLoss0.00022519.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73bd1b279432fb53a459c9f83804b6ff48cd9bcf6524f3a20c86423c58e9b36b
|
3 |
+
size 2323516
|
modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20533877941565f1d1ab6ffe7aa8cd4e91f5ace283cb0de1a0c9ee6b55002f24
|
3 |
+
size 30805653
|
net_BNN.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import pickle
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
from torch.distributions import Normal
|
8 |
+
import numpy as np
|
9 |
+
import pandas as pd
|
10 |
+
from matplotlib import pyplot as plt
|
11 |
+
from sklearn.preprocessing import StandardScaler,MinMaxScaler
|
12 |
+
from torch.nn.utils.rnn import pad_sequence
|
13 |
+
from torch.utils.data import Dataset, DataLoader
|
14 |
+
import torch.nn.functional as F
|
15 |
+
|
16 |
+
class CycleDataset(Dataset):
|
17 |
+
def __init__(self, data, attrib_x, attrib_y, max_len, C_rated, min_val=None, max_val=None, mode='train'):
|
18 |
+
self.data = data
|
19 |
+
self.cycle_indices = data['Cycle_Index'].unique()
|
20 |
+
self.attrib_x = attrib_x
|
21 |
+
self.attrib_y = attrib_y
|
22 |
+
self.C_rated = C_rated
|
23 |
+
self.mode = mode
|
24 |
+
self.max_len = max_len
|
25 |
+
|
26 |
+
self.data['Current'] /= self.C_rated
|
27 |
+
if mode == 'train':
|
28 |
+
self.min_val = data[attrib_x].values.min(axis=0)
|
29 |
+
self.max_val = data[attrib_x].values.max(axis=0)
|
30 |
+
with open('./para_BNN/min_max_values.pkl', 'wb') as f:
|
31 |
+
pickle.dump((self.min_val, self.max_val), f)
|
32 |
+
else:
|
33 |
+
self.min_val = min_val
|
34 |
+
self.max_val = max_val
|
35 |
+
|
36 |
+
|
37 |
+
def get_min_max_values(self):
|
38 |
+
if self.mode != 'train':
|
39 |
+
return None
|
40 |
+
return self.min_val, self.max_val
|
41 |
+
|
42 |
+
|
43 |
+
def __len__(self):
|
44 |
+
return len(self.cycle_indices)
|
45 |
+
|
46 |
+
def __getitem__(self, index):
|
47 |
+
cycle_index = self.cycle_indices[index]
|
48 |
+
cycle_data = self.data[self.data['Cycle_Index'] == cycle_index].copy()
|
49 |
+
|
50 |
+
# cycle_data['Current'] /= self.C_rated
|
51 |
+
|
52 |
+
# 提取特征和标签
|
53 |
+
features = cycle_data[self.attrib_x].values
|
54 |
+
# C_ini = cycle_data[self.attrib_y].values[0]
|
55 |
+
label = cycle_data[self.attrib_y].values[0]
|
56 |
+
|
57 |
+
# # 标准化特征
|
58 |
+
features = (features - self.min_val) / (self.max_val - self.min_val)
|
59 |
+
# label = (label - self.y_mean) / self.y_std
|
60 |
+
# features = (features - self.min_val) / self.max_val
|
61 |
+
pad_len = self.max_len - len(features)
|
62 |
+
|
63 |
+
features = torch.tensor(features, dtype=torch.float32).clone().detach()
|
64 |
+
# 在 features 后面填充固定值
|
65 |
+
features = torch.cat([features, torch.full((pad_len, features.shape[1]), 0)])
|
66 |
+
# 转换为张量
|
67 |
+
# features = torch.tensor(padded_features, dtype=torch.float32)
|
68 |
+
label = torch.tensor(label, dtype=torch.float32)
|
69 |
+
# label = label.view(1,1)
|
70 |
+
|
71 |
+
return features, label
|
72 |
+
|
73 |
+
#
|
74 |
+
# def pad_collate(self, batch):
|
75 |
+
# # 填充批次数据,使其长度一致
|
76 |
+
# features_batch, labels_batch = zip(*batch)
|
77 |
+
# features_batch = pad_sequence(features_batch, batch_first=True)
|
78 |
+
# labels_batch = torch.stack(labels_batch)
|
79 |
+
#
|
80 |
+
# return features_batch, labels_batch
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
class Transformer_FeatureExtractor(nn.Module):
|
85 |
+
def __init__(self, input_dim, output_dim, hidden_dim, num_layers, num_heads, batch_size, max_seq_len):
|
86 |
+
super(Transformer_FeatureExtractor, self).__init__()
|
87 |
+
|
88 |
+
self.num_layers = num_layers
|
89 |
+
self.hidden_size = hidden_dim
|
90 |
+
self.batch_size = batch_size
|
91 |
+
self.max_seq_len = max_seq_len
|
92 |
+
# self.cls_token = nn.Parameter(torch.randn(self.batch_size, 1, self.hidden_size))
|
93 |
+
self.embedding = nn.Linear(input_dim, hidden_dim)
|
94 |
+
self.position_encoding = self.create_position_encoding()
|
95 |
+
|
96 |
+
self.transformer_encoder = nn.TransformerEncoder(
|
97 |
+
nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=num_heads,dropout=0),
|
98 |
+
num_layers=num_layers
|
99 |
+
)
|
100 |
+
|
101 |
+
def create_position_encoding(self):
|
102 |
+
position_encoding = torch.zeros(self.max_seq_len, self.hidden_size)
|
103 |
+
position = torch.arange(0, self.max_seq_len).unsqueeze(1)
|
104 |
+
div_term = torch.exp(torch.arange(0, self.hidden_size, 2) * (-math.log(10000.0) / self.hidden_size))
|
105 |
+
position_encoding[:, 0::2] = torch.sin(position * div_term)
|
106 |
+
position_encoding[:, 1::2] = torch.cos(position * div_term)
|
107 |
+
position_encoding = position_encoding.unsqueeze(0)
|
108 |
+
return nn.Parameter(position_encoding, requires_grad=False)
|
109 |
+
|
110 |
+
def forward(self, x):
|
111 |
+
seq_len = x.shape[1]
|
112 |
+
positions = self.position_encoding[:, :seq_len, :]
|
113 |
+
x = self.embedding(x)
|
114 |
+
x = x + positions
|
115 |
+
# x = torch.cat((x, self.cls_token),dim=1)
|
116 |
+
# x = torch.cat((self.cls_token, x),dim=1)
|
117 |
+
x_layer = self.transformer_encoder(x)
|
118 |
+
feature = torch.mean(x_layer, dim=1)
|
119 |
+
return feature
|
120 |
+
|
121 |
+
# class BaseVaraitionLayer_(nn.Module):
|
122 |
+
# def __init__(self):
|
123 |
+
# super().__init__()
|
124 |
+
# def kl_div(self, mu_q, sigma_q, mu_p, sigma_p):
|
125 |
+
# '''
|
126 |
+
# Calculates kl divergence between two guassians (Q || P)
|
127 |
+
# :param mu_q: torch.Tensor -> mu parameter of distribution Q
|
128 |
+
# :param sigma_q: torch.Tensor -> sigma parameter of distribution Q
|
129 |
+
# :param mu_p: float -> mu parameter of distribution P
|
130 |
+
# :param sigma_p: float -> sigma parameter of distribution P
|
131 |
+
# :return: torch.Tensor of shape 0
|
132 |
+
# '''
|
133 |
+
# kl = torch.log(sigma_p) - torch.log(sigma_q)
|
134 |
+
# + (sigma_q**2 + (mu_q - mu_p)**2) / (2 * (sigma_p**2)) - 0.5
|
135 |
+
# return kl.sum()
|
136 |
+
|
137 |
+
|
138 |
+
class BayesLinear(nn.Module):
|
139 |
+
def __init__(self, input_dim, output_dim, prior_mu, prior_sigma):
|
140 |
+
super(BayesLinear, self).__init__()
|
141 |
+
self.input_dim = input_dim
|
142 |
+
self.output_dim = output_dim
|
143 |
+
self.prior_mu = prior_mu
|
144 |
+
self.prior_sigma = prior_sigma
|
145 |
+
|
146 |
+
self.weight_mu = nn.Parameter(torch.Tensor(output_dim, input_dim))
|
147 |
+
self.weight_rho = nn.Parameter(torch.Tensor(output_dim, input_dim))
|
148 |
+
self.bias_mu = nn.Parameter(torch.Tensor(output_dim))
|
149 |
+
self.bias_rho = nn.Parameter(torch.Tensor(output_dim))
|
150 |
+
|
151 |
+
self.weight = None
|
152 |
+
self.bias = None
|
153 |
+
|
154 |
+
self.prior = Normal(prior_mu, prior_sigma)
|
155 |
+
self.reset_parameters()
|
156 |
+
|
157 |
+
def reset_parameters(self):
|
158 |
+
nn.init.kaiming_uniform_(self.weight_mu, a=math.sqrt(self.input_dim))
|
159 |
+
nn.init.constant_(self.weight_rho, -3.0)
|
160 |
+
nn.init.zeros_(self.bias_mu)
|
161 |
+
nn.init.constant_(self.bias_rho, -3.0)
|
162 |
+
|
163 |
+
def forward(self, input):
|
164 |
+
weight_epsilon = torch.randn_like(self.weight_mu)
|
165 |
+
bias_epsilon = torch.randn_like(self.bias_mu)
|
166 |
+
|
167 |
+
weight_sigma = torch.log1p(torch.exp(self.weight_rho))
|
168 |
+
bias_sigma = torch.log1p(torch.exp(self.bias_rho))
|
169 |
+
|
170 |
+
self.weight = self.weight_mu + weight_sigma * weight_epsilon
|
171 |
+
self.bias = self.bias_mu + bias_sigma * bias_epsilon
|
172 |
+
|
173 |
+
weight_log_prior = self.prior.log_prob(self.weight)
|
174 |
+
bias_log_prior = self.prior.log_prob(self.bias)
|
175 |
+
self.log_prior = torch.sum(weight_log_prior) + torch.sum(bias_log_prior)
|
176 |
+
|
177 |
+
self.weight_post = Normal(self.weight_mu.data, torch.log(1 + torch.exp(self.weight_rho)))
|
178 |
+
self.bias_post = Normal(self.bias_mu.data, torch.log(1 + torch.exp(self.bias_rho)))
|
179 |
+
self.log_post = self.weight_post.log_prob(self.weight).sum() + self.bias_post.log_prob(self.bias).sum()
|
180 |
+
|
181 |
+
# output_mean = torch.matmul(input, weight.t()) + bias
|
182 |
+
# output_var = torch.matmul(input, weight_sigma.t())**2 + bias_sigma**2
|
183 |
+
# output_mean = nn.functional.linear(input, self.weight_mu, self.bias_mu)
|
184 |
+
# output_variance = nn.functional.linear(input ** 2, weight_sigma ** 2, bias_sigma ** 2) + 1e-8
|
185 |
+
# return output_mean, output_var
|
186 |
+
return F.linear(input, self.weight, self.bias)
|
187 |
+
|
188 |
+
class BNN_Regression(nn.Module):
|
189 |
+
def __init__(self, input_dim, output_dim, noise_tol):
|
190 |
+
super(BNN_Regression, self).__init__()
|
191 |
+
|
192 |
+
self.input_dim = input_dim
|
193 |
+
self.output_dim = output_dim
|
194 |
+
# self.batch_size = batch_size
|
195 |
+
self.noise_tol = noise_tol
|
196 |
+
|
197 |
+
self.relu = nn.ReLU()
|
198 |
+
self.tanh = nn.Tanh()
|
199 |
+
# self.bnn1 = BayesLinear(input_dim=input_dim, output_dim=64, prior_mu=0, prior_sigma=1.)
|
200 |
+
# self.bnn2 = BayesLinear(input_dim=64, output_dim=32, prior_mu=0, prior_sigma=1.)
|
201 |
+
# self.fc = BayesLinear(input_dim=16, output_dim=output_dim,prior_mu=0, prior_sigma=1.)
|
202 |
+
self.bnn = BayesLinear(input_dim=input_dim, output_dim=16, prior_mu=0, prior_sigma=1.)
|
203 |
+
|
204 |
+
self.fc = BayesLinear(input_dim=16, output_dim=output_dim, prior_mu=0, prior_sigma=1.)
|
205 |
+
|
206 |
+
def forward(self, x):
|
207 |
+
x = self.bnn(x)
|
208 |
+
x = self.relu(x)
|
209 |
+
predictions = self.fc(x)
|
210 |
+
# x = self.bnn1(x)
|
211 |
+
# x = self.relu(x)
|
212 |
+
# x = self.bnn2(x)
|
213 |
+
# x = self.tanh(x)
|
214 |
+
# x = self.bnn3(x)
|
215 |
+
# x = self.relu(x)
|
216 |
+
# predictions = self.fc(x)
|
217 |
+
|
218 |
+
return predictions
|
219 |
+
|
220 |
+
|
221 |
+
def log_prior(self):
|
222 |
+
# calculate the log prior over all the layers
|
223 |
+
# return self.bnn1.log_prior + self.bnn2.log_prior + self.bnn3.log_prior + self.fc.log_prior
|
224 |
+
|
225 |
+
return self.bnn.log_prior + self.fc.log_prior
|
226 |
+
|
227 |
+
def log_post(self):
|
228 |
+
# calculate the log posterior over all the layers
|
229 |
+
# return self.bnn1.log_post + self.bnn2.log_post + self.bnn3.log_post + self.fc.log_post
|
230 |
+
|
231 |
+
return self.bnn.log_post + self.fc.log_post
|
232 |
+
|
233 |
+
|
234 |
+
def sample_elbo(self, input, target, samples, device):
|
235 |
+
# we calculate the negative elbo, which will be our loss function
|
236 |
+
# initialize tensors
|
237 |
+
outputs = torch.zeros(samples, target.shape[0]).to(device)
|
238 |
+
log_priors = torch.zeros(samples)
|
239 |
+
log_posts = torch.zeros(samples)
|
240 |
+
log_likes = torch.zeros(samples)
|
241 |
+
# make predictions and calculate prior, posterior, and likelihood for a given number of samples
|
242 |
+
# 蒙特卡罗近似
|
243 |
+
for i in range(samples):
|
244 |
+
outputs[i] = self(input).reshape(-1) # make predictions
|
245 |
+
log_priors[i] = self.log_prior() # get log prior
|
246 |
+
log_posts[i] = self.log_post() # get log variational posterior
|
247 |
+
log_likes[i] = Normal(outputs[i], self.noise_tol).log_prob(target.reshape(-1)).sum() # calculate the log likelihood
|
248 |
+
# calculate monte carlo estimate of prior posterior and likelihood
|
249 |
+
log_prior = log_priors.mean()
|
250 |
+
log_post = log_posts.mean()
|
251 |
+
log_like = log_likes.mean()
|
252 |
+
# calculate the negative elbo (which is our loss function)
|
253 |
+
loss = log_post - log_prior - log_like
|
254 |
+
return loss
|
255 |
+
|
256 |
+
|
257 |
+
class ATBNN_Model(nn.Module):
|
258 |
+
def __init__(self, input_dim, output_dim, hidden_dim, num_layers, num_heads, batch_size, max_seq_len):
|
259 |
+
super(ATBNN_Model, self).__init__()
|
260 |
+
|
261 |
+
self.feature_extractor = Transformer_FeatureExtractor(input_dim=input_dim,
|
262 |
+
output_dim=hidden_dim,
|
263 |
+
hidden_dim=hidden_dim,
|
264 |
+
num_layers=num_layers,
|
265 |
+
num_heads=num_heads,
|
266 |
+
batch_size=batch_size,
|
267 |
+
max_seq_len=max_seq_len)
|
268 |
+
|
269 |
+
self.bnn_regression = BNN_Regression(input_dim=hidden_dim,
|
270 |
+
output_dim=output_dim,
|
271 |
+
noise_tol=0.01)
|
272 |
+
|
273 |
+
def forward(self, x):
|
274 |
+
self.features = self.feature_extractor(x)
|
275 |
+
predictions = self.bnn_regression(self.features)
|
276 |
+
return predictions
|
paras.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
attrib_feature = ['Current','Voltage','Environment_Temperature','Cell_Temperature','SOC']
|
4 |
+
attrib_label = ['SOH']
|
5 |
+
max_len = 200
|
6 |
+
input_dim = len(attrib_feature)
|
7 |
+
output_dim = 1
|
8 |
+
hidden_dim = 64
|
9 |
+
num_layers = 2
|
10 |
+
num_heads = 4
|
11 |
+
lr = 1e-3
|
12 |
+
max_seq_len = 200
|
13 |
+
batch_size = 5
|
14 |
+
C_rated = 1.1
|
requirements.txt
ADDED
File without changes
|
test_BNN.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import pickle
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
from matplotlib import pyplot as plt
|
9 |
+
|
10 |
+
from torch.utils.data import Dataset, DataLoader
|
11 |
+
|
12 |
+
import net_BNN
|
13 |
+
|
14 |
+
# 使用已训练的模型进行推理
|
15 |
+
|
16 |
+
def test_plot(dataloader,model,device,criterion):
|
17 |
+
total_loss = 0.0
|
18 |
+
num_samples = 0
|
19 |
+
num_sample = 10
|
20 |
+
model.eval()
|
21 |
+
with torch.no_grad():
|
22 |
+
predictions = []
|
23 |
+
true_labels = []
|
24 |
+
cov = []
|
25 |
+
# upp = []
|
26 |
+
# low = []
|
27 |
+
|
28 |
+
for test_features, test_labels in dataloader:
|
29 |
+
# outputs = model(test_features.to(device))
|
30 |
+
|
31 |
+
# MC sample
|
32 |
+
sample_output = torch.zeros(num_sample, test_labels.shape[0]).to(device)
|
33 |
+
for i in range(num_sample):
|
34 |
+
sample_output[i] = model(test_features.to(device)).reshape(-1)
|
35 |
+
|
36 |
+
outputs = torch.Tensor(sample_output.mean(dim=0).unsqueeze(1)).to(device)
|
37 |
+
covs = torch.Tensor(sample_output.std(dim=0).unsqueeze(1)).to(device)
|
38 |
+
|
39 |
+
loss = criterion(outputs, test_labels.to(device))
|
40 |
+
total_loss += loss.item() * test_features.size(0)
|
41 |
+
num_samples += test_features.size(0)
|
42 |
+
|
43 |
+
predictions.append((outputs).tolist())
|
44 |
+
cov.append(covs.tolist())
|
45 |
+
true_labels.append((test_labels).tolist())
|
46 |
+
# upp.append(np.percentile(sample_output.cpu().detach().numpy() * float(C_test),95).tolist())
|
47 |
+
# low.append(np.percentile(sample_output.cpu().detach().numpy() * float(C_test),5).tolist())
|
48 |
+
|
49 |
+
average_loss = total_loss / num_samples
|
50 |
+
print('Validation Loss: {:.8f}'.format(average_loss))
|
51 |
+
#
|
52 |
+
|
53 |
+
x = range(len(sum(predictions, [])))
|
54 |
+
|
55 |
+
pred_array = np.array(sum(predictions, [])).flatten()
|
56 |
+
var_array = np.array(sum(cov, [])).flatten()
|
57 |
+
|
58 |
+
plt.plot(sum(predictions,[]), label='Predictions')
|
59 |
+
|
60 |
+
plt.plot(sum(true_labels,[]), label='True Labels')
|
61 |
+
# plt.fill_between(x, upp, low, alpha=0.5, label='Confidence Interval')
|
62 |
+
plt.fill_between(x, pred_array + var_array, pred_array - var_array, alpha=0.5, label='Confidence Interval')
|
63 |
+
|
64 |
+
plt.legend()
|
65 |
+
plt.xlabel('Sample Index')
|
66 |
+
plt.ylabel('Cycle Capacity/Ah')
|
67 |
+
plt.show()
|
68 |
+
|
69 |
+
#
|
70 |
+
base_path = r'E:\member\ShiJH\Battery Datasets\SNL_18650_LFP Datasets\modified_dataset'
|
71 |
+
attrib_feature = ['Current','Voltage','Environment_Temperature','Cell_Temperature','SOC']
|
72 |
+
attrib_label = ['SOH']
|
73 |
+
|
74 |
+
with open('./min_max_values.pkl', 'rb') as f:
|
75 |
+
min_val, max_val = pickle.load(f)
|
76 |
+
|
77 |
+
max_len = 200
|
78 |
+
input_dim = len(attrib_feature)
|
79 |
+
output_dim = 1
|
80 |
+
hidden_dim = 64
|
81 |
+
num_layers = 2
|
82 |
+
num_heads = 4
|
83 |
+
lr = 1e-3
|
84 |
+
max_seq_len = 200
|
85 |
+
batch_size = 5
|
86 |
+
C_rated = 1.1
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
testdata_path = base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv')
|
92 |
+
test_data = pd.read_csv(testdata_path)
|
93 |
+
test_data['SOH'] = test_data['cycle capacity'] / test_data['cycle capacity'].values[0]
|
94 |
+
# C_val = val_data[attrib_label].values[0]
|
95 |
+
# scaler_val = train_dataset.scaler
|
96 |
+
test_dataset = net_BNN.CycleDataset(
|
97 |
+
data=test_data,
|
98 |
+
attrib_x=attrib_feature,
|
99 |
+
attrib_y=attrib_label,
|
100 |
+
max_len=max_len,
|
101 |
+
C_rated=C_rated,
|
102 |
+
min_val=min_val,
|
103 |
+
max_val=max_val,
|
104 |
+
mode='test')
|
105 |
+
test_dataloader = DataLoader(test_dataset, batch_size=batch_size,shuffle=False,drop_last=True)
|
106 |
+
# 导入网络结构
|
107 |
+
model = net_BNN.ATBNN_Model(
|
108 |
+
input_dim = input_dim,
|
109 |
+
output_dim = output_dim,
|
110 |
+
hidden_dim = hidden_dim,
|
111 |
+
num_layers = num_layers,
|
112 |
+
num_heads = num_heads,
|
113 |
+
batch_size = batch_size,
|
114 |
+
max_seq_len= max_seq_len)
|
115 |
+
|
116 |
+
# model.load_state_dict(torch.load("./model_B/model_epoch634_Trainloss0.00013560_ValLoss0.00107357.pt")) #LOSS=0
|
117 |
+
# model.load_state_dict(torch.load("./model_B/model_epoch2168_Trainloss0.00001729_ValLoss0.00107112.pt"))
|
118 |
+
model.load_state_dict(torch.load("./model_epoch8876_Trainloss0.00001546_ValLoss0.00022519.pt"))
|
119 |
+
|
120 |
+
# model.load_state_dict(torch.load("./model/model_epoch1.pt"))
|
121 |
+
|
122 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
123 |
+
model.to(device)
|
124 |
+
criterion = nn.MSELoss(reduction='mean')
|
125 |
+
|
126 |
+
|
127 |
+
test_plot(test_dataloader,model,device,criterion)
|
train_BNN.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import random
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
from matplotlib import pyplot as plt
|
9 |
+
from torch.utils.data import Dataset, DataLoader
|
10 |
+
|
11 |
+
import net_BNN
|
12 |
+
|
13 |
+
|
14 |
+
def train(n_epochs,dataloader,val_dataloader,model,criterion,optimizer,device):
|
15 |
+
|
16 |
+
num_batches = len(dataloader)
|
17 |
+
num_sample = 10
|
18 |
+
|
19 |
+
model.train() #重要!设置模式
|
20 |
+
for epoch in range(n_epochs):
|
21 |
+
total_loss = 0
|
22 |
+
total_MSEloss = 0
|
23 |
+
for inputs,labels in dataloader:
|
24 |
+
inputs,labels = inputs.to(device),labels.to(device) #GPU
|
25 |
+
optimizer.zero_grad()
|
26 |
+
|
27 |
+
# MC sample
|
28 |
+
# sample_output = torch.zeros(num_sample, labels.shape[0]).to(device)
|
29 |
+
# for i in range(num_sample):
|
30 |
+
# sample_output[i] = model(inputs).reshape(-1)
|
31 |
+
#
|
32 |
+
# outputs = torch.Tensor(sample_output.mean(dim=0).unsqueeze(1))
|
33 |
+
|
34 |
+
outputs = model(inputs)
|
35 |
+
features = model.features
|
36 |
+
loss = model.bnn_regression.sample_elbo(features, labels, 10, device)
|
37 |
+
|
38 |
+
|
39 |
+
MSEloss = criterion(outputs,labels)
|
40 |
+
loss.backward()
|
41 |
+
optimizer.step()
|
42 |
+
total_loss += loss.item()
|
43 |
+
total_MSEloss += MSEloss.item()
|
44 |
+
|
45 |
+
train_loss = total_loss / num_batches
|
46 |
+
train_MSEloss = total_MSEloss / num_batches
|
47 |
+
|
48 |
+
val_loss, val_MSEloss = val(val_dataloader,model,criterion,device)
|
49 |
+
|
50 |
+
|
51 |
+
# torch.save(model.state_dict(),".\model.pth")
|
52 |
+
torch.save(model.state_dict(), f"./model_B/model_epoch{epoch + 1 }_Trainloss{train_MSEloss:.8f}_ValLoss{val_MSEloss:.8f}.pt")
|
53 |
+
print('Epoch [{}/{}], Train_Loss: {:.8f}, Val_Loss: {:.8f}'.format(epoch + 1, num_epochs , train_loss,val_loss), end=' ')
|
54 |
+
print('Train_MSE_Loss: {:.8f}, Val_MSE_Loss: {:.8f}'.format(train_MSEloss, val_MSEloss))
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
def val(dataloader,model,criterion,device):
|
59 |
+
val_loss = 0
|
60 |
+
num_batches = len(dataloader)
|
61 |
+
val_MSEloss = 0
|
62 |
+
num_sample = 10
|
63 |
+
model.eval() #重要!设置模式
|
64 |
+
with torch.no_grad():
|
65 |
+
for inputs,labels in dataloader:
|
66 |
+
inputs = inputs.to(device)
|
67 |
+
labels = labels.to(device) #GPU
|
68 |
+
|
69 |
+
# # MC sample
|
70 |
+
# sample_output = torch.zeros(num_sample, labels.shape[0]).to(device)
|
71 |
+
# for i in range(num_sample):
|
72 |
+
# sample_output[i] = model(inputs).reshape(-1)
|
73 |
+
#
|
74 |
+
# outputs = torch.Tensor(sample_output.mean(dim=0).unsqueeze(1))
|
75 |
+
outputs = model(inputs)
|
76 |
+
features = model.features
|
77 |
+
loss = model.bnn_regression.sample_elbo(features, labels, 1, device)
|
78 |
+
# outputs = model(inputs)
|
79 |
+
# features = model.features.to(device)
|
80 |
+
# loss = model.bnn_regression.sample_elbo(features, labels, 1, device)
|
81 |
+
MSEloss = criterion(outputs,labels)
|
82 |
+
val_loss += loss.item()
|
83 |
+
val_MSEloss += MSEloss.item()
|
84 |
+
val_loss = val_loss / num_batches
|
85 |
+
val_MSEloss = val_MSEloss / num_batches
|
86 |
+
|
87 |
+
return val_loss, val_MSEloss
|
88 |
+
|
89 |
+
|
90 |
+
def test_plot(dataloader,model,device,criterion):
|
91 |
+
total_loss = 0.0
|
92 |
+
num_samples = 0
|
93 |
+
model.eval()
|
94 |
+
with torch.no_grad():
|
95 |
+
predictions = []
|
96 |
+
true_labels = []
|
97 |
+
var = []
|
98 |
+
for test_features, test_labels in dataloader:
|
99 |
+
outputs, vars = model(test_features.to(device))
|
100 |
+
loss = criterion(outputs, test_labels.to(device))
|
101 |
+
total_loss += loss.item() * test_features.size(0)
|
102 |
+
num_samples += test_features.size(0)
|
103 |
+
|
104 |
+
predictions.append(outputs.tolist())
|
105 |
+
var.append(vars.tolist())
|
106 |
+
true_labels.append(test_labels.tolist())
|
107 |
+
|
108 |
+
average_loss = total_loss / num_samples
|
109 |
+
print('Validation Loss: {:.8f}'.format(average_loss))
|
110 |
+
#
|
111 |
+
# predictions = [(np.array(x) * y_std + y_mean).tolist() for x in predictions]
|
112 |
+
# true_labels = [(np.array(x) * y_std + y_mean).tolist() for x in true_labels]
|
113 |
+
x = range(len(sum(predictions, [])))
|
114 |
+
|
115 |
+
pred_array = np.array(sum(predictions, [])).flatten()
|
116 |
+
var_array = np.array(sum(var, [])).flatten()
|
117 |
+
|
118 |
+
plt.plot(sum(predictions,[]), label='Predictions')
|
119 |
+
|
120 |
+
plt.plot(sum(true_labels,[]), label='True Labels')
|
121 |
+
|
122 |
+
plt.fill_between(x, pred_array + var_array, pred_array - var_array, alpha=0.5)
|
123 |
+
|
124 |
+
plt.legend()
|
125 |
+
plt.xlabel('Sample Index')
|
126 |
+
plt.ylabel('Cycle Capacity')
|
127 |
+
plt.show()
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
def setup_seed(seed):
|
132 |
+
torch.manual_seed(seed)
|
133 |
+
torch.cuda.manual_seed_all(seed)
|
134 |
+
np.random.seed(seed)
|
135 |
+
random.seed(seed)
|
136 |
+
torch.backends.cudnn.deterministic = True
|
137 |
+
|
138 |
+
# 设置随机数种子
|
139 |
+
setup_seed(20)
|
140 |
+
|
141 |
+
base_path = r'E:\member\ShiJH\Battery Datasets\SNL_18650_LFP Datasets\modified_dataset'
|
142 |
+
# csv_files_list = [base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-1C_a_timeseries.csv'),
|
143 |
+
# # base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-1C_b_timeseries.csv'),
|
144 |
+
# base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-3C_a_timeseries.csv')]
|
145 |
+
# train_data = pd.DataFrame()
|
146 |
+
# cycle_index = 0
|
147 |
+
# index_max = 0
|
148 |
+
# for csv_file in csv_files_list:
|
149 |
+
# df = pd.read_csv(csv_file)
|
150 |
+
# C_ini = df['cycle capacity'].values[0]
|
151 |
+
# df['SOH'] = df['cycle capacity'] / C_ini
|
152 |
+
# index_max = df['Cycle_Index'].max()
|
153 |
+
# df['Cycle_Index'] = df['Cycle_Index'] + cycle_index
|
154 |
+
# cycle_index += index_max
|
155 |
+
# train_data = pd.concat([train_data, df], ignore_index=True)
|
156 |
+
traindata_path = base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-3C_a_timeseries.csv')
|
157 |
+
train_data = pd.read_csv(traindata_path)
|
158 |
+
train_data['SOH'] = train_data['cycle capacity'] / train_data['cycle capacity'].values[0]
|
159 |
+
|
160 |
+
# attrib_feature = ['Test_Time','Charge_Capacity','Discharge_Capacity','Voltage','Environment_Temperature','Cell_Temperature']
|
161 |
+
attrib_feature = ['Current','Voltage','Environment_Temperature','Cell_Temperature','SOC']
|
162 |
+
attrib_label = ['SOH']
|
163 |
+
max_len = 200 # 初值100
|
164 |
+
C_rated = 1.1
|
165 |
+
C_train = train_data[attrib_label].values[0]
|
166 |
+
train_dataset = net_BNN.CycleDataset(
|
167 |
+
data= train_data,
|
168 |
+
attrib_x=attrib_feature,
|
169 |
+
attrib_y=attrib_label,
|
170 |
+
max_len=max_len,
|
171 |
+
C_rated=C_rated,
|
172 |
+
mode='train')
|
173 |
+
min_val, max_val = train_dataset.get_min_max_values()
|
174 |
+
|
175 |
+
|
176 |
+
batch_size = 30
|
177 |
+
# train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
|
178 |
+
# collate_fn=train_dataset.pad_collate, drop_last=True)
|
179 |
+
|
180 |
+
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
|
181 |
+
|
182 |
+
valdata_path = base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-3C_b_timeseries.csv')
|
183 |
+
val_data = pd.read_csv(valdata_path)
|
184 |
+
val_data['SOH'] = val_data['cycle capacity'] / val_data['cycle capacity'].values[0]
|
185 |
+
# C_val = val_data[attrib_label].values[0]
|
186 |
+
# scaler_val = train_dataset.scaler
|
187 |
+
val_dataset = net_BNN.CycleDataset(
|
188 |
+
data=val_data,
|
189 |
+
attrib_x=attrib_feature,
|
190 |
+
attrib_y=attrib_label,
|
191 |
+
max_len=max_len,
|
192 |
+
C_rated=C_rated,
|
193 |
+
min_val=min_val,
|
194 |
+
max_val=max_val,
|
195 |
+
mode='val')
|
196 |
+
|
197 |
+
# val_dataloader = DataLoader(tar_dataset, batch_size=batch_size,shuffle=False, collate_fn=tar_dataset.pad_collate,drop_last=True)
|
198 |
+
val_dataloader = DataLoader(val_dataset, batch_size=batch_size,shuffle=False,drop_last=True)
|
199 |
+
|
200 |
+
|
201 |
+
# testdata_path = base_path + str('\modified_SNL_18650_LFP_25C_0-100_0.5-1C_c_timeseries.csv')
|
202 |
+
# test_data = pd.read_csv(testdata_path)
|
203 |
+
# C_test = test_data[attrib_label].values[0]
|
204 |
+
# test_dataset = net.CycleDataset(test_data, attrib_feature, attrib_label, C_test, max_len, C_rated,
|
205 |
+
# min_val=min_val, max_val=max_val, mode='test')
|
206 |
+
# # test_dataloader = DataLoader(test_dataset, batch_size=batch_size,shuffle=False, collate_fn=test_dataset.pad_collate,drop_last=True)
|
207 |
+
# test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)
|
208 |
+
|
209 |
+
|
210 |
+
# 初始化Transformer模型
|
211 |
+
input_dim = len(attrib_feature)
|
212 |
+
output_dim = 1
|
213 |
+
hidden_dim = 64
|
214 |
+
num_layers = 2
|
215 |
+
num_heads = 4
|
216 |
+
lr = 1e-4
|
217 |
+
max_seq_len = 200
|
218 |
+
|
219 |
+
|
220 |
+
# ATBNN_model = net.ATBNN_Model(input_dim, output_dim, hidden_dim, num_layers, num_heads, batch_size, max_seq_len)
|
221 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
222 |
+
|
223 |
+
# 导入网络结构
|
224 |
+
ATBNN_model = net_BNN.ATBNN_Model(
|
225 |
+
input_dim = input_dim,
|
226 |
+
output_dim = output_dim,
|
227 |
+
hidden_dim = hidden_dim,
|
228 |
+
num_layers = num_layers,
|
229 |
+
num_heads = num_heads,
|
230 |
+
batch_size = batch_size,
|
231 |
+
max_seq_len= max_seq_len)
|
232 |
+
# ATBNN_model.load_state_dict(torch.load("./model_BNN_new/model_epoch185_Trainloss0.01930173_ValLoss0.01682474.pt"))
|
233 |
+
ATBNN_model.to(device)
|
234 |
+
|
235 |
+
optimizer = torch.optim.Adam(ATBNN_model.parameters(), lr=lr)
|
236 |
+
# optimizer = torch.optim.Adadelta(ATBNN_model.parameters(), lr=1.0, rho=0.9, eps=1e-6, weight_decay=0)
|
237 |
+
# optimizer = torch.optim.SGD(ATBNN_model.parameters(),lr=lr)
|
238 |
+
# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.9)
|
239 |
+
criterion = nn.MSELoss(reduction='mean')
|
240 |
+
num_epochs = 10000
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
ATBNN_model.to(device)
|
245 |
+
train(num_epochs, train_dataloader, val_dataloader, ATBNN_model, criterion, optimizer, device)
|
246 |
+
# test_plot(test_dataloader, ATBNN_model, device, criterion)
|