如何添加绘制损失函数曲线
#使用dnn模型(k折交叉验证)import torch
import torch.nn as nn
from torch.utils import data
from torch.utils.data import Dataset,DataLoader
from torch import optim
#定义神经网络模型
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN,self).__init__()
self.hidden_layer1 = nn.Linear(154,1024)
self.hidden_layer2 = nn.Linear(1024,1024)
self.hidden_layer3 = nn.Linear(1024,256)
#self.hidden_layer4 = nn.Linear(1024,1024)
self.output_layer = nn.Linear(256,1)
self.dropout = nn.Dropout(p=0.2)
nn.init.xavier_uniform_(self.hidden_layer1.weight)
nn.init.xavier_uniform_(self.hidden_layer2.weight)
nn.init.xavier_uniform_(self.hidden_layer3.weight)
#nn.init.xavier_uniform_(self.hidden_layer4.weight)
nn.init.xavier_uniform_(self.output_layer.weight)
def forward(self,x):
inputs = x
layer1_out = torch.nn.functional.relu(self.hidden_layer1(inputs))
layer1_out = self.dropout(layer1_out)
layer2_out = torch.nn.functional.relu(self.hidden_layer2(layer1_out))
layer2_out = self.dropout(layer2_out)
layer3_out = torch.nn.functional.relu(self.hidden_layer3(layer2_out))
layer3_out = self.dropout(layer3_out)
#layer4_out = torch.nn.functional.gelu(self.hidden_layer4(layer3_out))
#layer4_out = self.dropout(layer4_out)
#output = torch.relu(self.output_layer(layer4_out))
output = torch.relu(self.output_layer(layer3_out))
return output
# 设置超参数
k = 5
batch_size = 64
num_epochs = 1000
weight_decay = 0.001
#初始化模型和优化器
dnn_model = SimpleNN().to(device)# 将模型移到GPU上
optimizer = optim.AdamW(dnn_model.parameters(),lr=0.0001,weight_decay=weight_decay) #定义优化器
#k折交叉验证选取训练集与验证集
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = len(X) // k
X_train, y_train = None, None
for j in range(k):
start = j * fold_size
end = (j + 1) * fold_size
if j == i:
X_valid, y_valid = X.iloc, y.iloc
elif X_train is None:
X_train, y_train = X.iloc, y.iloc
else:
X_train = pd.concat(], ignore_index=True)
y_train = pd.concat(], ignore_index=True)
return X_train, y_train, X_valid, y_valid
#初始化列表
train_ls, valid_ls = [], []
for i in range(k):
X_train, y_train, X_valid, y_valid = get_k_fold_data(k, i, X, y)
print(f'FOLD {i}')
print('--------------------------------')
# 将DataFrame数据转换为NumPy数组,然后再转换为PyTorch张量,并且移动到GPU上
X_train = torch.tensor(X_train.astype(np.float32).values, dtype=torch.float32).to(device)
y_train = torch.tensor(y_train.astype(np.float32).values, dtype=torch.float32).to(device)
X_valid = torch.tensor(X_valid.astype(np.float32).values, dtype=torch.float32).to(device)
y_valid = torch.tensor(y_valid.astype(np.float32).values, dtype=torch.float32).to(device)
#创建数据集
train_ds = data.TensorDataset(X_train, y_train)
valid_ds = data.TensorDataset(X_valid, y_valid)
# 获取一个数据迭代器
train_iter = DataLoader(dataset=train_ds,batch_size=batch_size,shuffle=True,num_workers=0)#shuffle=True相当于sampler=RandomSampler(dataset)
valid_iter = DataLoader(dataset=valid_ds,batch_size=batch_size,shuffle=True,num_workers=0)
#开始迭代
for epoch in range(num_epochs):
train_loss = 0
for tensor_x, tensor_y in train_iter:#训练集执行梯度更新
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
optimizer.zero_grad() #梯度清零
pre_train = dnn_model(tensor_x)
train_l = MSLE_loss(pre_train, tensor_y) #损失应避免与全局变量loss重名
train_l.backward()#前向传播
optimizer.step()#梯度下降
train_loss += train_l.item() * len(tensor_x)
train_loss /= len(train_ds) #每次迭代平均损失
if epoch % 50 == 0:
print('Loss: {}Epoch:{}'.format(train_loss, epoch))
#print(train_l)
with torch.no_grad():
valid_loss = 0
for tensor_x, tensor_y in valid_iter:
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
pre_valid = dnn_model(tensor_x)
valid_l = MSLE_loss(pre_valid, tensor_y)
valid_loss += valid_l.item() * len(tensor_x)
valid_loss /= len(valid_ds)
if epoch % 50 == 0:
print('Valid Loss: {}Epoch:{}'.format(valid_loss, epoch))
#print(valid_l)
#将每折的损失添加到列表中
train_ls.append(train_loss)
valid_ls.append(valid_loss)
print('Training Ended')
print('Train Average Loss: {} Valid Average Loss: {}'.format(np.mean(train_ls),np.mean(valid_ls)))
如何在上述代码中绘制损失函数曲线?训练损失和验证损失画在同一张图 要在训练过程中绘制损失函数曲线,你可以利用`matplotlib`库来绘制损失随训练迭代次数变化的图。我们可以在每个epoch结束时记录训练和验证损失,然后在所有epochs结束后再绘制这些数据。
以下是如何修改你的代码以绘制损失曲线的步骤:
1. 在代码最前面添加`matplotlib`库的导入:
import matplotlib.pyplot as plt
2. 在每个epoch结束时,记录下当前epoch的训练和验证损失:
# 初始化列表来存储每个epoch的损失
epoch_train_losses = []
epoch_valid_losses = []
for epoch in range(num_epochs):
train_loss = 0
...
train_loss /= len(train_ds)
epoch_train_losses.append(train_loss)# 记录训练损失
with torch.no_grad():
valid_loss = 0
...
valid_loss /= len(valid_ds)
epoch_valid_losses.append(valid_loss)# 记录验证损失
3. 在所有k折训练完成后,绘制损失曲线:
# 训练结束后绘制损失曲线
plt.plot(range(num_epochs), epoch_train_losses, label='Train Loss')
plt.plot(range(num_epochs), epoch_valid_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.legend()
plt.show()
注意:
- 确保`matplotlib`库已经安装,你可以通过运行`pip install matplotlib`来安装它。
- 上述代码在每个epoch记录损失,并在所有训练结束后绘图。根据需要,你可以调整记录频率,比如每隔几个epoch记录一次以减少数据量。
- 如果你希望在每折k折结束时绘制其损失曲线,可以在k折内部引入这样的绘图代码,然后为不同folds追加在一起绘图。
以上回复来自 FishC AI 小助理,如未能正确解答您的问题,请继续追问。 FishC 发表于 2024-10-29 10:16
要在训练过程中绘制损失函数曲线,你可以利用`matplotlib`库来绘制损失随训练迭代次数变化的图。我们可以在 ...
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In, line 2
1 # 训练结束后绘制损失曲线
----> 2 plt.plot(range(num_epochs), epoch_train_losses, label='Train Loss')
3 plt.plot(range(num_epochs), epoch_valid_losses, label='Validation Loss')
4 plt.xlabel('Epoch')
File /opt/conda/lib/python3.10/site-packages/matplotlib/pyplot.py:2812, in plot(scalex, scaley, data, *args, **kwargs)
2810 @_copy_docstring_and_deprecators(Axes.plot)
2811 def plot(*args, scalex=True, scaley=True, data=None, **kwargs):
-> 2812 return gca().plot(
2813 *args, scalex=scalex, scaley=scaley,
2814 **({"data": data} if data is not None else {}), **kwargs)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_axes.py:1688, in Axes.plot(self, scalex, scaley, data, *args, **kwargs)
1445 """
1446 Plot y versus x as lines and/or markers.
1447
(...)
1685 (``'green'``) or hex strings (``'#008000'``).
1686 """
1687 kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
-> 1688 lines = [*self._get_lines(*args, data=data, **kwargs)]
1689 for line in lines:
1690 self.add_line(line)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_base.py:311, in _process_plot_var_args.__call__(self, data, *args, **kwargs)
309 this += args,
310 args = args
--> 311 yield from self._plot_args(
312 this, kwargs, ambiguous_fmt_datakey=ambiguous_fmt_datakey)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_base.py:504, in _process_plot_var_args._plot_args(self, tup, kwargs, return_kwargs, ambiguous_fmt_datakey)
501 self.axes.yaxis.update_units(y)
503 if x.shape != y.shape:
--> 504 raise ValueError(f"x and y must have same first dimension, but "
505 f"have shapes {x.shape} and {y.shape}")
506 if x.ndim > 2 or y.ndim > 2:
507 raise ValueError(f"x and y can be no greater than 2D, but have "
508 f"shapes {x.shape} and {y.shape}")
ValueError: x and y must have same first dimension, but have shapes (350,) and (1750,)
上述代码修改后报错 Terence888 发表于 2024-10-29 10:47
-------------------------------------------------- ...
报错的原因是`x`和`y`的长度不匹配,可能是由于在每个epoch都进行了多个k-fold的累积。
要解决这个问题,可以在每个k-fold的最后阶段计算平均损失。这是如何做的:
1. 在每个fold开始时,初始化一个临时的存储器:
fold_train_losses = []
fold_valid_losses = []
2. 在epoch循环内记录损失:
fold_train_losses.append(train_loss)
fold_valid_losses.append(valid_loss)
3. 在k-fold结束时,累加这些损失:
if i == 0:
epoch_train_losses = fold_train_losses
epoch_valid_losses = fold_valid_losses
else:
epoch_train_losses =
epoch_valid_losses =
4. 在k-fold循环结束后,计算平均损失:
epoch_train_losses =
epoch_valid_losses =
5. 然后绘制图形:
plt.plot(range(num_epochs), epoch_train_losses, label='Train Loss')
plt.plot(range(num_epochs), epoch_valid_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.legend()
plt.show()
这样可以确保`x`和`y`的长度一致,并且每个epoch的损失值是所有fold平均后的值。
以上回复来自 FishC AI 小助理,如未能正确解答您的问题,请继续追问。 FishC 发表于 2024-10-29 11:04
报错的原因是`x`和`y`的长度不匹配,可能是由于在每个epoch都进行了多个k-fold的累积。
要解决这个 ...
#使用dnn模型(k折交叉验证)
import torch
import torch.nn as nn
from torch.utils import data
from torch.utils.data import Dataset,DataLoader
from torch import optim
#定义神经网络模型
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN,self).__init__()
self.hidden_layer1 = nn.Linear(154,1024)
self.hidden_layer2 = nn.Linear(1024,1024)
self.hidden_layer3 = nn.Linear(1024,256)
#self.hidden_layer4 = nn.Linear(1024,1024)
self.output_layer = nn.Linear(256,1)
self.dropout = nn.Dropout(p=0.2)
nn.init.xavier_uniform_(self.hidden_layer1.weight)
nn.init.xavier_uniform_(self.hidden_layer2.weight)
nn.init.xavier_uniform_(self.hidden_layer3.weight)
#nn.init.xavier_uniform_(self.hidden_layer4.weight)
nn.init.xavier_uniform_(self.output_layer.weight)
def forward(self,x):
inputs = x
layer1_out = torch.nn.functional.relu(self.hidden_layer1(inputs))
layer1_out = self.dropout(layer1_out)
layer2_out = torch.nn.functional.relu(self.hidden_layer2(layer1_out))
layer2_out = self.dropout(layer2_out)
layer3_out = torch.nn.functional.relu(self.hidden_layer3(layer2_out))
layer3_out = self.dropout(layer3_out)
#layer4_out = torch.nn.functional.gelu(self.hidden_layer4(layer3_out))
#layer4_out = self.dropout(layer4_out)
#output = torch.relu(self.output_layer(layer4_out))
output = torch.relu(self.output_layer(layer3_out))
return output
# 设置超参数
k = 5
batch_size = 112
num_epochs = 350
weight_decay = 0.001
#初始化模型和优化器
dnn_model = SimpleNN().to(device)# 将模型移到GPU上
optimizer = optim.Adam(dnn_model.parameters(),lr=0.0001,weight_decay=weight_decay) #定义优化器
#k折交叉验证选取训练集与验证集
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = len(X) // k
X_train, y_train = None, None
for j in range(k):
start = j * fold_size
end = (j + 1) * fold_size
if j == i:
X_valid, y_valid = X.iloc, y.iloc
elif X_train is None:
X_train, y_train = X.iloc, y.iloc
else:
X_train = pd.concat(], ignore_index=True)
y_train = pd.concat(], ignore_index=True)
return X_train, y_train, X_valid, y_valid
#初始化列表
train_ls, valid_ls = [], []
for i in range(k):
X_train, y_train, X_valid, y_valid = get_k_fold_data(k, i, X, y)
print(f'FOLD {i}')
print('--------------------------------')
# 将DataFrame数据转换为NumPy数组,然后再转换为PyTorch张量,并且移动到GPU上
X_train = torch.tensor(X_train.astype(np.float32).values, dtype=torch.float32).to(device)
y_train = torch.tensor(y_train.astype(np.float32).values, dtype=torch.float32).to(device)
X_valid = torch.tensor(X_valid.astype(np.float32).values, dtype=torch.float32).to(device)
y_valid = torch.tensor(y_valid.astype(np.float32).values, dtype=torch.float32).to(device)
#创建数据集
train_ds = data.TensorDataset(X_train, y_train)
valid_ds = data.TensorDataset(X_valid, y_valid)
# 获取一个数据迭代器
train_iter = DataLoader(dataset=train_ds,batch_size=batch_size,shuffle=True,num_workers=0)#shuffle=True相当于sampler=RandomSampler(dataset)
valid_iter = DataLoader(dataset=valid_ds,batch_size=batch_size,shuffle=True,num_workers=0)
#开始迭代
for epoch in range(num_epochs):
train_loss = 0
for tensor_x, tensor_y in train_iter:#训练集执行梯度更新
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
optimizer.zero_grad() #梯度清零
pre_train = dnn_model(tensor_x)
train_l = MSLE_loss(pre_train, tensor_y) #损失应避免与全局变量loss重名
train_l.backward()#前向传播
optimizer.step()#梯度下降
train_loss += train_l.item() * len(tensor_x)
train_loss /= len(train_ds) #每次迭代平均损失
if epoch % 50 == 0:
print('Loss: {}Epoch:{}'.format(train_loss, epoch))
with torch.no_grad():
valid_loss = 0
for tensor_x, tensor_y in valid_iter:
tensor_x = tensor_x.float()
tensor_y = tensor_y.float().reshape(-1, 1)
pre_valid = dnn_model(tensor_x)
valid_l = MSLE_loss(pre_valid, tensor_y)
valid_loss += valid_l.item() * len(tensor_x)
valid_loss /= len(valid_ds)
if epoch % 50 == 0:
print('Valid Loss: {}Epoch:{}'.format(valid_loss, epoch))
if i == 0:
d2l.plot(list(range(1, num_epochs + 1)), ,
xlabel='epoch', ylabel='rmse', xlim=,
legend=['train', 'valid'], yscale='log')
#将每折的损失添加到列表中
train_ls.append(train_loss)
valid_ls.append(valid_loss)
print('Training Ended')
print('Train Average Loss: {} Valid Average Loss: {}'.format(np.mean(train_ls),np.mean(valid_ls)))
绘制图像时报错---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In, line 133
130 print('Valid Loss: {}Epoch:{}'.format(valid_loss, epoch))
132 if i == 0:
--> 133 d2l.plot(list(range(1, num_epochs + 1)), ,
134 xlabel='epoch', ylabel='rmse', xlim=,
135 legend=['train', 'valid'], yscale='log')
136 #将每折的损失添加到列表中
137 train_ls.append(train_loss)
Cell In, line 101, in plot(X, Y, xlabel, ylabel, legend, xlim, ylim, xscale, yscale, fmts, figsize, axes)
99 for x, y, fmt in zip(X, Y, fmts):
100 if len(x):
--> 101 axes.plot(x, y, fmt)
102 else:
103 axes.plot(y, fmt)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_axes.py:1688, in Axes.plot(self, scalex, scaley, data, *args, **kwargs)
1445 """
1446 Plot y versus x as lines and/or markers.
1447
(...)
1685 (``'green'``) or hex strings (``'#008000'``).
1686 """
1687 kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
-> 1688 lines = [*self._get_lines(*args, data=data, **kwargs)]
1689 for line in lines:
1690 self.add_line(line)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_base.py:311, in _process_plot_var_args.__call__(self, data, *args, **kwargs)
309 this += args,
310 args = args
--> 311 yield from self._plot_args(
312 this, kwargs, ambiguous_fmt_datakey=ambiguous_fmt_datakey)
File /opt/conda/lib/python3.10/site-packages/matplotlib/axes/_base.py:504, in _process_plot_var_args._plot_args(self, tup, kwargs, return_kwargs, ambiguous_fmt_datakey)
501 self.axes.yaxis.update_units(y)
503 if x.shape != y.shape:
--> 504 raise ValueError(f"x and y must have same first dimension, but "
505 f"have shapes {x.shape} and {y.shape}")
506 if x.ndim > 2 or y.ndim > 2:
507 raise ValueError(f"x and y can be no greater than 2D, but have "
508 f"shapes {x.shape} and {y.shape}")
ValueError: x and y must have same first dimension, but have shapes (350,) and (2,)
如何修改 Terence888 发表于 2024-10-29 11:34
#使用dnn模型(k折交叉验证)
import torch
import ...
你遇到的错误是因为`x`和`y`列表在绘图时的维度不匹配。具体地说,是绘图函数`d2l.plot`中所传的`y`数据有问题。结合你的代码,我来帮你理清思路,并给出完整、正确的损失曲线绘制方法。
首先,我们需要在每次迭代过程中记录下训练和验证损失,并确保它们和`x`轴所表示的`epoch`数量是一致的。以下是一些清晰的步骤及代码示例:
1. 确保导入了`matplotlib`库:
import matplotlib.pyplot as plt
2. 初始化列表来存储每个epoch的损失:
train_ls = []
valid_ls = []
3. 在训练循环内部添加记录损失的逻辑:
for epoch in range(num_epochs):
train_loss = 0
...
train_loss /= len(train_ds)
fold_train_losses.append(train_loss)# 记录训练损失
valid_loss = 0
...
valid_loss /= len(valid_ds)
fold_valid_losses.append(valid_loss)# 记录验证损失
4. 在k-fold循环外进行绘图:
# 初始化列表以记录平均损失
avg_train_losses = * num_epochs
avg_valid_losses = * num_epochs
# 对于每个Fold,计算损失
for epoch in range(num_epochs):
avg_train_losses += fold_train_losses / k
avg_valid_losses += fold_valid_losses / k
# 画图
plt.plot(range(1, num_epochs + 1), avg_train_losses, label='Train Loss')
plt.plot(range(1, num_epochs + 1), avg_valid_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Curve')
plt.legend()
plt.show()
这里,我们在每个Epoch的基础上累加来自不同Folds的损失,并计算其平均值。这会确保绘图时数据维度是一致的。`avg_train_losses`和`avg_valid_losses`都是长度为`num_epochs`的列表,这样可以保证`x`和`y`在绘图时的维度相同。
希望这能解决你在绘图过程中遇到的问题!如果在某个地方有不清楚或需要进一步解释的细节,请随时告知!
以上回复来自 FishC AI 小助理,如未能正确解答您的问题,请继续追问。
页:
[1]