鱼C论坛

 找回密码
 立即注册
查看: 538|回复: 1

在人家的代码中我想加加一个LSTM模型测试一下结果,但是加上之后出现了问题。

[复制链接]
发表于 2023-9-27 19:15:08 | 显示全部楼层 |阅读模式

马上注册,结交更多好友,享用更多功能^_^

您需要 登录 才可以下载或查看,没有账号?立即注册

x
这个是LSTM模型
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np

class LSTM(nn.Module):
    def __init__(self, input_num, hid_num, layers_num, out_num, batch_first=True):
        super().__init__()
        self.l1 = nn.LSTM(input_size=input_num,hidden_size=hid_num,num_layers=layers_num,batch_first=batch_first)
        self.out = nn.Linear(hid_num,out_num)

    def forward(self,data):
        flow_x = data['flow_x'] #B*T*D
        l_out,(h_n, c_n) = self.l1(flow_x,None) #None表示第一次 hidden_state是0
        print(l_out[:, -1, :])
        out = self.out(l_out[:, -1, :])
        return out

这个是我的代码
import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader

from traffic_dataset import LoadData
from utils import Evaluation
from utils import visualize_result
from chebnet import ChebNet
from gat import GATNet
from lstm import LSTM

class Baseline(nn.Module):
    def __init__(self, in_c, out_c):
        super(Baseline, self).__init__()
        self.layer = nn.Linear(in_c, out_c)

    def forward(self, data, device):
        flow_x = data["flow_x"].to(device)  # [B, N, H, D]

        B, N = flow_x.size(0), flow_x.size(1)

        flow_x = flow_x.view(B, N, -1)  # [B, N, H*D]  H = 6, D = 1

        output = self.layer(flow_x)  # [B, N, Out_C], Out_C = D

        return output.unsqueeze(2)  # [B, N, 1, D=Out_C]


def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # Loading Dataset

    train_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=[46, 16],
                          time_interval=5, history_length=6,
                          train_mode="train")
    train_loader = DataLoader(train_data, batch_size=64, shuffle=True, num_workers=8)

    test_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=[46, 16],
                         time_interval=5, history_length=6,
                         train_mode="test")
    test_loader = DataLoader(test_data, batch_size=64, shuffle=False, num_workers=8)

    # Loading Model
    # my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2)
    # my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2, lstm_hidden_dim=1)
    # my_net = GCN(in_c=6,hid_c=6,out_c=1)
    # my_net = ChebNet(in_c=6, hid_c=6, out_c=1, K=5)
    my_net = LSTM(input_num=6,hid_num=6,layers_num=3,out_num=1)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    my_net = my_net.to(device)

    criterion = nn.MSELoss()

    optimizer = optim.Adam(params=my_net.parameters())

    # Train model
    Epoch = 100

    my_net.train()
    for epoch in range(Epoch):
        epoch_loss = 0.0
        start_time = time.time()
        for data in train_loader:  # ["graph": [B, N, N] , "flow_x": [B, N, H, D], "flow_y": [B, N, 1, D]]
            my_net.zero_grad()

            predict_value = my_net(data, device).to(torch.device("cpu"))  # [0, 1] -> recover

            loss = criterion(predict_value, data["flow_y"])

            epoch_loss += loss.item()

            loss.backward()

            optimizer.step()
        end_time = time.time()

        print("Epoch: {:04d}, Loss: {:02.4f}, Time: {:02.2f} mins".format(epoch, 1000 * epoch_loss / len(train_data),
                                                                          (end_time-start_time)/60))

    # Test Model
    my_net.eval()
    with torch.no_grad():
        MAE, MAPE, RMSE = [], [], []
        # Target = np.zeros([307, 1, 1]) # [N, 1, D]
        Target = np.zeros([170, 1, 1]) # [N, 1, D]

        Predict = np.zeros_like(Target)  #[N, T, D]

        total_loss = 0.0
        for data in test_loader:

            predict_value = my_net(data, device).to(torch.device("cpu"))  # [B, N, 1, D]  -> [1, N, B(T), D]

            loss = criterion(predict_value, data["flow_y"])

            total_loss += loss.item()

            predict_value = predict_value.transpose(0, 2).squeeze(0)  # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
            target_value = data["flow_y"].transpose(0, 2).squeeze(0)  # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]

            performance, data_to_save = compute_performance(predict_value, target_value, test_loader)

            Predict = np.concatenate([Predict, data_to_save[0]], axis=1)
            Target = np.concatenate([Target, data_to_save[1]], axis=1)

            MAE.append(performance[0])
            MAPE.append(performance[1])
            RMSE.append(performance[2])

        print("Test Loss: {:02.4f}".format(1000 * total_loss / len(test_data)))

    print("Performance:  MAE {:2.2f}   MAPE {:2.2f}%  RMSE  {:2.2f}".format(np.mean(MAE), np.mean(MAPE * 100), np.mean(RMSE)))

    Predict = np.delete(Predict, 0, axis=1)
    Target = np.delete(Target, 0, axis=1)

    result_file = "GAT_result.h5"
    file_obj = h5py.File(result_file, "w")

    file_obj["predict"] = Predict
    file_obj["target"] = Target


def compute_performance(prediction, target, data):
    try:
        dataset = data.dataset  # dataloader
    except:
        dataset = data  # dataset

    prediction = LoadData.recover_data(dataset.flow_norm[0], dataset.flow_norm[1], prediction.numpy())
    target = LoadData.recover_data(dataset.flow_norm[0], dataset.flow_norm[1], target.numpy())

    mae, mape, rmse = Evaluation.total(target.reshape(-1), prediction.reshape(-1))

    performance = [mae, mape, rmse]
    recovered_data = [prediction, target]

    return performance, recovered_data


if __name__ == '__main__':
    main()
    # visualize_result(h5_file="GAT_result.h5",
    #                  nodes_id=120,
    #                  time_se=[0, 24 * 12 * 2],
    #                  visualize_file="gat_node_120")

这个是我最后报错的情况
Traceback (most recent call last):
  File "D:\traffic\traffic_prediction\traffic_prediction.py", line 196, in <module>
    main()
  File "D:\traffic\traffic_prediction\traffic_prediction.py", line 120, in main
    predict_value = my_net(data, device).to(torch.device("cpu"))  # [0, 1] -> recover
  File "D:\anaconda\envs\traffic_pred\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given

进程已结束,退出代码1


我想知道的是,我应该如何做才能解决这个问题,希望有人教我一下,本人没有学过类似的东西,第一次接触。
想知道小甲鱼最近在做啥?请访问 -> ilovefishc.com
回复

使用道具 举报

发表于 2023-9-27 19:15:45 | 显示全部楼层
根据报错信息, forward()  函数接收了两个参数,但是在调用  my_net(data, device)  时却给了三个参数。这是因为  my_net  模型的  forward()  函数只接收一个参数,而你在调用时给了两个参数  data  和  device 。解决这个问题的方法是将  device  作为参数传递给  my_net  的构造函数,然后在  forward()  函数中使用  device 。修改后的代码如下所示:

```
import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader

from traffic_dataset import LoadData
from utils import Evaluation
from utils import visualize_result
from chebnet import ChebNet
from gat import GATNet
from lstm import LSTM

class Baseline(nn.Module):
    def __init__(self, in_c, out_c):
        super(Baseline, self).__init__()
        self.layer = nn.Linear(in_c, out_c)

    def forward(self, data, device):
        flow_x = data["flow_x"].to(device)  # [B, N, H, D]

        B, N = flow_x.size(0), flow_x.size(1)

        flow_x = flow_x.view(B, N, -1)  # [B, N, H*D]  H = 6, D = 1

        output = self.layer(flow_x)  # [B, N, Out_C], Out_C = D

        return output.unsqueeze(2)  # [B, N, 1, D=Out_C]

def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # Loading Dataset

    train_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=[46, 16],
                          time_interval=5, history_length=6,
                          train_mode="train")
    train_loader = DataLoader(train_data, batch_size=64, shuffle=True, num_workers=8)

    test_data = LoadData(data_path=["PEMS08/PEMS08.csv", "PEMS08/PEMS08.npz"], num_nodes=170, divide_days=[46, 16],
                          time_interval=5, history_length=6,
                          train_mode="test")
    test_loader = DataLoader(test_data, batch_size=64, shuffle=False, num_workers=8)

    # Loading Model
    # my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2)
    # my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2, lstm_hidden_dim=1)
    # my_net = GCN(in_c=6,hid_c=6,out_c=1)
    # my_net = ChebNet(in_c=6, hid_c=6, out_c=1, K=5)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    my_net = LSTM(input_num=6,hid_num=6,layers_num=3,out_num=1, device=device)

    my_net = my_net.to(device)

    criterion = nn.MSELoss()

    optimizer = optim.Adam(params=my_net.parameters())

    # Train model
    Epoch = 100

    my_net.train()
    for epoch in range(Epoch):
        epoch_loss = 0.0
        start_time = time.time()
        for data in train_loader:  # ["graph": [B, N, N] , "flow_x": [B, N, H, D], "flow_y": [B, N, 1, D]]
            my_net.zero_grad()

            predict_value = my_net(data).to(torch.device("cpu"))  # [0, 1] -> recover

            loss = criterion(predict_value, data["flow_y"])

            epoch_loss += loss.item()

            loss.backward()

            optimizer.step()
        end_time = time.time()

        print("Epoch: {:04d}, Loss: {:02.4f}, Time: {:02.2f} mins".format(epoch, 1000 * epoch_loss / len(train_data),
                                                                         (end_time - start_time) / 60))

    # Test Model
    my_net.eval()
    with torch.no_grad():
        MAE, MAPE, RMSE = [], [], []
        # Target = np.zeros([307, 1, 1]) # [N, 1, D]
        Target = np.zeros([170, 1, 1])  # [N, 1, D]

        Predict = np.zeros_like(Target)  #[N, T, D]

        total_loss = 0.0
        for data in test_loader:

            predict_value = my_net(data).to(torch.device("cpu"))  # [B, N, 1, D]  -> [1, N, B(T), D]

            loss = criterion(predict_value, data["flow_y"])

            total_loss += loss.item()

            predict_value = predict_value.transpose(0, 2).squeeze(0)  # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]
            target_value = data["flow_y"].transpose(0, 2).squeeze(0)  # [1, N, B(T), D] -> [N, B(T), D] -> [N, T, D]

            performance, data_to_save = compute_performance(predict_value, target_value, test_loader)

            Predict = np.concatenate([Predict, data_to_save[0]], axis=1)
            Target = np.concatenate([Target, data_to_save[1]], axis=1)

            MAE.append(performance[0])
            MAPE.append(performance[1])
            RMSE.append(performance[2])

        print("Test Loss: {:02.4f}".format(1000 * total_loss / len(test_data)))

    print("Performance:  MAE {:2.2f}   MAPE {:2.2f}%   RMSE   {:2.2f}".format(np.mean(MAE), np.mean(MAPE * 100), np.mean(RMSE)))

    Predict = np.delete(Predict, 0, axis=1)
    Target = np.delete(Target, 0, axis
想知道小甲鱼最近在做啥?请访问 -> ilovefishc.com
回复 支持 反对

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

小黑屋|手机版|Archiver|鱼C工作室 ( 粤ICP备18085999号-1 | 粤公网安备 44051102000585号)

GMT+8, 2024-11-21 23:05

Powered by Discuz! X3.4

© 2001-2023 Discuz! Team.

快速回复 返回顶部 返回列表