|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
训练模型代码#使用dnn模型
import torch
import torch.nn as nn
from torch.utils import data
from torch.utils.data import Dataset,DataLoader
from torch import optim
#定义神经网络模型
dropout1, dropout2 = 0.3, 0.6
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__() # 继承需要用 SimpleNN
self.dense = nn.Sequential(
nn.Flatten(),
nn.Linear(12, 128),
nn.ReLU(),
nn.Dropout(dropout1),
nn.Linear(128, 256),
nn.ReLU(),
nn.Dropout(dropout2),
nn.Linear(256, 1),
)
def forward(self, X):
x = self.dense(X)
output = torch.sigmoid(x)
return output
#初始化模型和优化器
nn_model = SimpleNN()
loss = nn.BCELoss() #定义损失函数
optimizer = optim.Adam(nn_model.parameters(),lr=0.0001) #定义优化器
#初始化列表
acc_list = []
loss_list = []
#k折交叉验证选取训练集与验证集
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = len(X) // k
X_train, y_train = None, None
for j in range(k):
start = j * fold_size
end = (j + 1) * fold_size
if j == i:
X_valid, y_valid = X.iloc[start:end], y.iloc[start:end]
elif X_train is None:
X_train, y_train = X.iloc[:start], y.iloc[start:end]
else:
X_train = pd.concat([X_train, X.iloc[start:end]], ignore_index=True)
y_train = pd.concat([y_train, y.iloc[start:end]], ignore_index=True)
return X_train, y_train, X_valid, y_valid
# 开始训练
k = 5
batch_size = 99
num_epochs = 1000
for i in range(k):
X_train, y_train, X_valid, y_valid = get_k_fold_data(k, i, X, y)
print(f'FOLD {i}')
print('--------------------------------')
#将DataFrame数据转换为NumPy数组,然后再转换为PyTorch张量
X_train = torch.tensor(X_train.astype(np.float32).values, dtype=torch.float32)
y_train = torch.tensor(y_train.astype(np.float32).values, dtype=torch.float32)
X_valid = torch.tensor(X_valid.astype(np.float32).values, dtype=torch.float32)
y_valid = torch.tensor(y_valid.astype(np.float32).values, dtype=torch.float32)
#创建数据集
train_dataset = data.TensorDataset(X_train, y_train)
valid_dataset = data.TensorDataset(X_valid, y_valid)
# 获取一个数据迭代器
train_iter = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True,num_workers=2)#shuffle=True相当于sampler=RandomSampler(dataset)
valid_iter = DataLoader(dataset=valid_dataset,batch_size=batch_size,shuffle=True,num_workers=2)
#开始迭代
for epoch in range(num_epochs):
train_loss = 0
num_right = 0
for tensor_x, tensor_y in train_iter:#训练集执行梯度更新
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
optimizer.zero_grad() #梯度清零
pre_train = nn_model(tensor_x)
train_l = loss(pre_train, tensor_y) #损失应避免与全局变量loss重名
train_l.backward()#前向传播
optimizer.step()#梯度下降
train_loss += train_l.item() * len(tensor_x)
result = [1 if out >= 0.5 else 0 for out in pre_train]
num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
train_loss = train_loss / len(train_iter.dataset)
train_accuracy = num_right / len(train_iter.dataset)
if epoch % 200 == 0:
print('Loss: {} Accuracy: {} Epoch:{}'.format(train_loss, train_accuracy, epoch))
with torch.no_grad():
valid_loss = 0
num_right = 0
for tensor_x, tensor_y in valid_iter:
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
pre_valid = nn_model(tensor_x)
valid_l = loss(pre_valid, tensor_y)
valid_loss += valid_l.item() * len(tensor_x)
result = [1 if out >= 0.5 else 0 for out in pre_valid]
num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
valid_loss = valid_loss / len(valid_iter.dataset)
valid_accuracy = num_right / len(valid_iter.dataset)
if epoch % 200 == 0:
print('Valid Loss: {} Accuracy: {} Epoch:{}'.format(valid_loss, valid_accuracy, epoch))
#将每次迭代的结果写入列表
loss_list.append(valid_loss)
acc_list.append(valid_accuracy)
print('Training Ended')
print('Average Loss: {} Average Accuracy: {}'.format(np.mean(loss_list), np.mean(acc_list)))
报错代码---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[28], line 75
72 y_valid = torch.tensor(y_valid.astype(np.float32).values, dtype=torch.float32)
74 #创建数据集
---> 75 train_dataset = data.TensorDataset(X_train, y_train)
76 valid_dataset = data.TensorDataset(X_valid, y_valid)
78 # 获取一个数据迭代器
File /opt/conda/lib/python3.10/site-packages/torch/utils/data/dataset.py:204, in TensorDataset.__init__(self, *tensors)
203 def __init__(self, *tensors: Tensor) -> None:
--> 204 assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors), "Size mismatch between tensors"
205 self.tensors = tensors
AssertionError: Size mismatch between tensors
|
|