| 
 | 
 
 
 楼主 |
发表于 2024-10-22 22:04:01
|
显示全部楼层
 
 
 
- from mlxtend.regressor import StackingRegressor
 
 - from sklearn.metrics import mean_squared_error
 
  
- # 初始化基模型
 
 - models = [LR_model, dt_model, svm_model, rf_model,knn_model,simple_dnn_model]
 
  
- print('base model')
 
 - for model in models:
 
 -     model.fit(X_train, y_train)
 
 -     pred = model.predict(X_valid)
 
 -     print("loss is {}".format(mean_squared_error(y_valid, pred)))
 
 - sclf = StackingRegressor(regressors=models, meta_regressor=LinearRegression)
 
 - # 训练回归器
 
 - sclf.fit(X_train, y_train)
 
 - pred = sclf.predict(X_valid)
 
  
- print('stacking model')
 
 - print("loss is {}".format(mean_squared_error(y_valid, pred)))
 
 - plt.scatter(np.arange(len(pred)), pred)
 
 - plt.plot(np.arange(len(y_valid)), y_valid)
 
 - plt.show()
 
  复制代码 
 
上述代码提示报错未定义simplednnmodel,- --------------------------------------------------------------------------
 
 - NameError                                 Traceback (most recent call last)
 
 - Cell In[43], line 5
 
 -       2 from sklearn.metrics import mean_squared_error
 
 -       4 # 初始化基模型
 
 - ----> 5 models = [LR_model, dt_model, svm_model, rf_model,knn_model,simple_dnn_model]
 
 -       7 print('base model')
 
 -       8 for model in models:
 
  
- NameError: name 'simple_dnn_model' is not defined
 
  复制代码 
但是前面已经定义并执行了这个函数- #dnn模型(train_test_split)
 
 - import torch 
 
 - import torch.nn as nn
 
 - from torch.utils import data
 
 - from torch.utils.data import Dataset,DataLoader
 
 - from torch import optim
 
  
- #定义神经网络模型
 
 - dropout1, dropout2 = 0.3, 0.6
 
 - class SimpleNN(nn.Module):
 
 -     def __init__(self):
 
 -         super(SimpleNN, self).__init__()  # 继承需要用 SimpleNN
 
 -         self.dense = nn.Sequential(
 
 -             nn.Flatten(),
 
 -             nn.Linear(12, 128),
 
 -             nn.ReLU(),  
 
 -             nn.Dropout(dropout1),
 
 -             nn.Linear(128, 256),
 
 -             nn.ReLU(),  
 
 -             nn.Dropout(dropout2),
 
 -             nn.Linear(256, 1),
 
 -         )
 
 -        
 
 -     def forward(self, X):
 
 -         x = self.dense(X)
 
 -         output = torch.sigmoid(x)
 
 -         return output
 
  
-     
 
 - #初始化模型和优化器
 
 - simple_dnn_model = SimpleNN()
 
 - loss = nn.BCELoss() #定义损失函数
 
 - optimizer = optim.Adam(nn_model.parameters(),lr=0.0001) #定义优化器
 
  
- #初始化列表
 
 - acc_list = []
 
 - loss_list = []
 
  
 
 
- # 开始训练
 
 - batch_size = 99
 
 - num_epochs = 1000
 
  
-     
 
 - #创建数据集
 
 - train_dataset = data.TensorDataset(X_train, y_train)
 
 - valid_dataset = data.TensorDataset(X_valid, y_valid)
 
  
- # 获取一个数据迭代器
 
 - train_iter = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True,num_workers=2)#shuffle=True相当于sampler=RandomSampler(dataset)
 
 - valid_iter = DataLoader(dataset=valid_dataset,batch_size=batch_size,shuffle=True,num_workers=2)
 
  
- #开始迭代
 
 - for epoch in range(num_epochs):
 
 -     train_loss = 0
 
 -     num_right = 0
 
 -     for tensor_x, tensor_y in train_iter:#训练集执行梯度更新
 
 -         tensor_x = tensor_x.float()
 
 -         tensor_y = tensor_y.float().reshape(-1, 1)
 
 -         optimizer.zero_grad() #梯度清零
 
 -         pre_train = simple_dnn_model(tensor_x)
 
 -         train_l = loss(pre_train, tensor_y) #损失应避免与全局变量loss重名
 
 -         train_l.backward()#前向传播
 
 -         optimizer.step()#梯度下降
 
  
-         train_loss += train_l.item() * len(tensor_x)#批量损失
 
 -         result = [1 if out >= 0.5 else 0 for out in pre_train]
 
 -         num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
 
  
-         train_loss = train_loss / len(train_iter.dataset)
 
 -         train_accuracy = num_right / len(train_iter.dataset)
 
  
-     if epoch % 200 == 0:
 
 -         print('Loss: {} Accuracy: {} Epoch:{}'.format(train_loss, train_accuracy, epoch))
 
  
-     with torch.no_grad():
 
 -         valid_loss = 0
 
 -         num_right = 0
 
 -         for tensor_x, tensor_y in valid_iter:
 
 -             tensor_x = tensor_x.float()
 
 -             tensor_y = tensor_y.float().reshape(-1, 1)
 
 -             pre_valid = simple_dnn_model(tensor_x)
 
 -             valid_l = loss(pre_valid, tensor_y)
 
 -             valid_loss += valid_l.item() * len(tensor_x)
 
 -             result = [1 if out >= 0.5 else 0 for out in pre_valid]
 
 -             num_right += np.sum(np.array(result) == tensor_y.numpy().reshape(-1))
 
  
-         valid_loss = valid_loss / len(valid_iter.dataset)
 
 -         valid_accuracy = num_right / len(valid_iter.dataset)
 
  
-         if epoch % 200 == 0:
 
 -             print('Valid Loss: {} Accuracy: {} Epoch:{}'.format(valid_loss, valid_accuracy, epoch))
 
  
- #将每次迭代的结果写入列表
 
 - loss_list.append(valid_loss)
 
 - acc_list.append(valid_accuracy)
 
  
- print('Training Ended')
 
 - print('Average Loss: {} Average Accuracy: {}'.format(np.mean(loss_list), np.mean(acc_list)))
 
  复制代码 |   
 
 
 
 |