|

楼主 |
发表于 2024-10-22 22:21:03
|
显示全部楼层
- #dnn模型(train_test_split)
- import torch
- import torch.nn as nn
- from torch.utils import data
- from torch.utils.data import Dataset,DataLoader
- from torch import optim
- #定义神经网络模型
- dropout1, dropout2 = 0.3, 0.6
- class SimpleNN(nn.Module):
- def __init__(self):
- super(SimpleNN, self).__init__() # 继承需要用 SimpleNN
- self.dense = nn.Sequential(
- nn.Flatten(),
- nn.Linear(12, 128),
- nn.ReLU(),
- nn.Dropout(dropout1),
- nn.Linear(128, 256),
- nn.ReLU(),
- nn.Dropout(dropout2),
- nn.Linear(256, 1),
- )
-
- def forward(self, X):
- x = self.dense(X)
- output = torch.sigmoid(x)
- return output
-
- #初始化模型和优化器
- simple_dnn_model = SimpleNN()
- loss = nn.BCELoss() #定义损失函数
- optimizer = optim.Adam(nn_model.parameters(),lr=0.0001) #定义优化器
- #初始化列表
- acc_list = []
- loss_list = []
- # 开始训练
- batch_size = 99
- num_epochs = 1000
-
- #创建数据集
- train_dataset = data.TensorDataset(X_train, y_train)
- valid_dataset = data.TensorDataset(X_valid, y_valid)
- # 获取一个数据迭代器
- train_iter = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True,num_workers=2)#shuffle=True相当于sampler=RandomSampler(dataset)
- valid_iter = DataLoader(dataset=valid_dataset,batch_size=batch_size,shuffle=True,num_workers=2)
- #开始迭代
- for epoch in range(num_epochs):
- train_loss = 0
- num_right = 0
- for tensor_x, tensor_y in train_iter:#训练集执行梯度更新
- tensor_x = tensor_x.float()
- tensor_y = tensor_y.float().reshape(-1, 1)
- optimizer.zero_grad() #梯度清零
- pre_train = simple_dnn_model(tensor_x)
- train_l = loss(pre_train, tensor_y) #损失应避免与全局变量loss重名
- train_l.backward()#前向传播
- optimizer.step()#梯度下降
- train_loss += train_l.item() * len(tensor_x)#批量损失
- result = [1 if out >= 0.5 else 0 for out in pre_train]
- num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
- train_loss = train_loss / len(train_iter.dataset)
- train_accuracy = num_right / len(train_iter.dataset)
- if epoch % 200 == 0:
- print('Loss: {} Accuracy: {} Epoch:{}'.format(train_loss, train_accuracy, epoch))
- with torch.no_grad():
- valid_loss = 0
- num_right = 0
- for tensor_x, tensor_y in valid_iter:
- tensor_x = tensor_x.float()
- tensor_y = tensor_y.float().reshape(-1, 1)
- pre_valid = simple_dnn_model(tensor_x)
- valid_l = loss(pre_valid, tensor_y)
- valid_loss += valid_l.item() * len(tensor_x)
- result = [1 if out >= 0.5 else 0 for out in pre_valid]
- num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
- valid_loss = valid_loss / len(valid_iter.dataset)
- valid_accuracy = num_right / len(valid_iter.dataset)
- if epoch % 200 == 0:
- print('Valid Loss: {} Accuracy: {} Epoch:{}'.format(valid_loss, valid_accuracy, epoch))
- #将每次迭代的结果写入列表
- loss_list.append(valid_loss)
- acc_list.append(valid_accuracy)
- print('Training Ended')
- print('Average Loss: {} Average Accuracy: {}'.format(np.mean(loss_list), np.mean(acc_list)))
复制代码
数据的特征和标签类型是tensor,转换为numpy后形状分别是- X_train.values.shape,y_train.values.shape,X_valid.values.shape,y_valid.values.shape
复制代码
((569, 12), (569,), (143, 12), (143,))
训练模型代码报错如下- ---------------------------------------------------------------------------
- TypeError Traceback (most recent call last)
- Cell In[63], line 47
- 43 num_epochs = 1000
- 46 #创建数据集
- ---> 47 train_dataset = data.TensorDataset(X_train, y_train)
- 48 valid_dataset = data.TensorDataset(X_valid, y_valid)
- 50 # 获取一个数据迭代器
- File /opt/conda/lib/python3.10/site-packages/torch/utils/data/dataset.py:204, in TensorDataset.__init__(self, *tensors)
- 203 def __init__(self, *tensors: Tensor) -> None:
- --> 204 assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors), "Size mismatch between tensors"
- 205 self.tensors = tensors
- File /opt/conda/lib/python3.10/site-packages/torch/utils/data/dataset.py:204, in <genexpr>(.0)
- 203 def __init__(self, *tensors: Tensor) -> None:
- --> 204 assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors), "Size mismatch between tensors"
- 205 self.tensors = tensors
- TypeError: 'int' object is not callable
复制代码 |
|