在人家的代码中我想加加一个LSTM模型测试一下结果,但是加上之后出现了问题。
这个是LSTM模型import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class LSTM(nn.Module):
def __init__(self, input_num, hid_num, layers_num, out_num, batch_first=True):
super().__init__()
self.l1 = nn.LSTM(input_size=input_num,hidden_size=hid_num,num_layers=layers_num,batch_first=batch_first)
self.out = nn.Linear(hid_num,out_num)
def forward(self,data):
flow_x = data['flow_x'] #B*T*D
l_out,(h_n, c_n) = self.l1(flow_x,None) #None表示第一次 hidden_state是0
print(l_out[:, -1, :])
out = self.out(l_out[:, -1, :])
return out
这个是我的代码
import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from traffic_dataset import LoadData
from utils import Evaluation
from utils import visualize_result
from chebnet import ChebNet
from gat import GATNet
from lstm import LSTM
class Baseline(nn.Module):
def __init__(self, in_c, out_c):
super(Baseline, self).__init__()
self.layer = nn.Linear(in_c, out_c)
def forward(self, data, device):
flow_x = data["flow_x"].to(device)#
B, N = flow_x.size(0), flow_x.size(1)
flow_x = flow_x.view(B, N, -1)# H = 6, D = 1
output = self.layer(flow_x)# , Out_C = D
return output.unsqueeze(2)#
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Loading Dataset
train_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=,
time_interval=5, history_length=6,
train_mode="train")
train_loader = DataLoader(train_data, batch_size=64, shuffle=True, num_workers=8)
test_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=,
time_interval=5, history_length=6,
train_mode="test")
test_loader = DataLoader(test_data, batch_size=64, shuffle=False, num_workers=8)
# Loading Model
# my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2)
# my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2, lstm_hidden_dim=1)
# my_net = GCN(in_c=6,hid_c=6,out_c=1)
# my_net = ChebNet(in_c=6, hid_c=6, out_c=1, K=5)
my_net = LSTM(input_num=6,hid_num=6,layers_num=3,out_num=1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
my_net = my_net.to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(params=my_net.parameters())
# Train model
Epoch = 100
my_net.train()
for epoch in range(Epoch):
epoch_loss = 0.0
start_time = time.time()
for data in train_loader:# ["graph": , "flow_x": , "flow_y": ]
my_net.zero_grad()
predict_value = my_net(data, device).to(torch.device("cpu"))# -> recover
loss = criterion(predict_value, data["flow_y"])
epoch_loss += loss.item()
loss.backward()
optimizer.step()
end_time = time.time()
print("Epoch: {:04d}, Loss: {:02.4f}, Time: {:02.2f} mins".format(epoch, 1000 * epoch_loss / len(train_data),
(end_time-start_time)/60))
# Test Model
my_net.eval()
with torch.no_grad():
MAE, MAPE, RMSE = [], [], []
# Target = np.zeros() #
Target = np.zeros() #
Predict = np.zeros_like(Target)#
total_loss = 0.0
for data in test_loader:
predict_value = my_net(data, device).to(torch.device("cpu"))# ->
loss = criterion(predict_value, data["flow_y"])
total_loss += loss.item()
predict_value = predict_value.transpose(0, 2).squeeze(0)# -> ->
target_value = data["flow_y"].transpose(0, 2).squeeze(0)# -> ->
performance, data_to_save = compute_performance(predict_value, target_value, test_loader)
Predict = np.concatenate(], axis=1)
Target = np.concatenate(], axis=1)
MAE.append(performance)
MAPE.append(performance)
RMSE.append(performance)
print("Test Loss: {:02.4f}".format(1000 * total_loss / len(test_data)))
print("Performance:MAE {:2.2f} MAPE {:2.2f}%RMSE{:2.2f}".format(np.mean(MAE), np.mean(MAPE * 100), np.mean(RMSE)))
Predict = np.delete(Predict, 0, axis=1)
Target = np.delete(Target, 0, axis=1)
result_file = "GAT_result.h5"
file_obj = h5py.File(result_file, "w")
file_obj["predict"] = Predict
file_obj["target"] = Target
def compute_performance(prediction, target, data):
try:
dataset = data.dataset# dataloader
except:
dataset = data# dataset
prediction = LoadData.recover_data(dataset.flow_norm, dataset.flow_norm, prediction.numpy())
target = LoadData.recover_data(dataset.flow_norm, dataset.flow_norm, target.numpy())
mae, mape, rmse = Evaluation.total(target.reshape(-1), prediction.reshape(-1))
performance =
recovered_data =
return performance, recovered_data
if __name__ == '__main__':
main()
# visualize_result(h5_file="GAT_result.h5",
# nodes_id=120,
# time_se=,
# visualize_file="gat_node_120")
这个是我最后报错的情况
Traceback (most recent call last):
File "D:\traffic\traffic_prediction\traffic_prediction.py", line 196, in <module>
main()
File "D:\traffic\traffic_prediction\traffic_prediction.py", line 120, in main
predict_value = my_net(data, device).to(torch.device("cpu"))# -> recover
File "D:\anaconda\envs\traffic_pred\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given
进程已结束,退出代码1
我想知道的是,我应该如何做才能解决这个问题,希望有人教我一下,本人没有学过类似的东西,第一次接触。 问题在于您的LSTM类中的forward方法需要一个参数,但您在main函数中调用my_net(data, device)时传递了两个参数。解决这个问题的方法是将数据和设备作为一个元组传递给my_net,如下所示:
predict_value = my_net((data, device)).to(torch.device("cpu"))
然后在LSTM类中的forward方法中进行解包:
def forward(self, data_device_tuple):
data, device = data_device_tuple
flow_x = data['flow_x'].to(device)
...
这样就可以解决这个问题了。 没有解决,不过应该是我数据处理的问题,很难过,但是没有办法。
页:
[1]