鱼C论坛

 找回密码
 立即注册
查看: 2193|回复: 6

基于ResNet网络模型和DenseNet网络模型的训练

[复制链接]
发表于 2023-4-23 16:40:51 | 显示全部楼层 |阅读模式
20鱼币
请根据以下多标签分类ResNet网络模型和DenseNet网络模型分别修改提供的训练代码train.py,预测代码predict.py和测试代码evaluate.py,以分别适应两个网络模型

ResNet网络模型代码如下:
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F


  4. # ResNet网络架构
  5. class BasicBlock(nn.Module):
  6.     expansion = 1

  7.     def __init__(self, in_planes, planes, stride=1):
  8.         super(BasicBlock, self).__init__()
  9.         self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
  10.         self.bn1 = nn.BatchNorm2d(planes)
  11.         self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
  12.         self.bn2 = nn.BatchNorm2d(planes)

  13.         self.shortcut = nn.Sequential()
  14.         if stride != 1 or in_planes != self.expansion*planes:
  15.             self.shortcut = nn.Sequential(
  16.                 nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
  17.                 nn.BatchNorm2d(self.expansion*planes)
  18.             )

  19.     def forward(self, x):
  20.         out = F.relu(self.bn1(self.conv1(x)))
  21.         out = self.bn2(self.conv2(out))
  22.         out += self.shortcut(x)
  23.         out = F.relu(out)
  24.         return out


  25. class ResNet(nn.Module):
  26.     def __init__(self, block, num_blocks, num_classes):
  27.         super(ResNet, self).__init__()
  28.         self.in_planes = 64

  29.         self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
  30.         self.bn1 = nn.BatchNorm2d(64)
  31.         self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
  32.         self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
  33.         self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
  34.         self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
  35.         self.linear = nn.Linear(512*block.expansion, num_classes)

  36.     def _make_layer(self, block, planes, num_blocks, stride):
  37.         strides = [stride] + [1]*(num_blocks-1)
  38.         layers = []
  39.         for stride in strides:
  40.             layers.append(block(self.in_planes, planes, stride))
  41.             self.in_planes = planes * block.expansion
  42.         return nn.Sequential(*layers)

  43.     def forward(self, x):
  44.         out = F.relu(self.bn1(self.conv1(x)))
  45.         out = self.layer1(out)
  46.         out = self.layer2(out)
  47.         out = self.layer3(out)
  48.         out = self.layer4(out)
  49.         out = F.avg_pool2d(out, 4)
  50.         out = out.view(out.size(0), -1)
  51.         out = self.linear(out)
  52.         out = torch.sigmoid(out)
  53.         return out


  54. def ResNet18(num_classes):
  55.     return ResNet(BasicBlock, [2,2,2,2], num_classes)
复制代码


DenseNet网络模型代码如下:
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F

  4. # DenseNet网络架构

  5. class BasicBlock(nn.Module):
  6.     def __init__(self, inplanes, outplanes):
  7.         super(BasicBlock, self).__init__()
  8.         self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=False)
  9.         self.bn1 = nn.BatchNorm2d(outplanes)
  10.         self.relu1 = nn.ReLU(inplace=True)
  11.         self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=False)
  12.         self.bn2 = nn.BatchNorm2d(outplanes)
  13.         self.relu2 = nn.ReLU(inplace=True)

  14.     def forward(self, x):
  15.         residual = x
  16.         out = self.conv1(x)
  17.         out = self.bn1(out)
  18.         out = self.relu1(out)

  19.         out = self.conv2(out)
  20.         out = self.bn2(out)

  21.         out += residual
  22.         out = self.relu2(out)

  23.         return out

  24. class DenseBlock(nn.Module):
  25.     def __init__(self, inplanes, outplanes, num_layers):
  26.         super(DenseBlock, self).__init__()
  27.         self.blocks = nn.ModuleList([BasicBlock(inplanes + i * outplanes, outplanes) for i in range(num_layers)])

  28.     def forward(self, x):
  29.         features = [x]
  30.         for block in self.blocks:
  31.             y = block(torch.cat(features,dim=1))
  32.             features.append(y)

  33.         return torch.cat(features,dim=1)

  34. class TransitionToFeatureMap(nn.Sequential):
  35.     def __init__(self, inplanes, outplanes):
  36.         super(TransitionToFeatureMap, self).__init__(
  37.             nn.Conv2d(inplanes, outplanes, kernel_size=1),
  38.             nn.BatchNorm2d(outplanes),
  39.             nn.AvgPool2d(kernel_size=2, stride=2))

  40. class DenseNet(nn.Module):
  41.     def __init__(self, num_classes, growth_rate, layers_num):
  42.         super(DenseNet, self).__init__()
  43.         self.growth_rate = growth_rate

  44.         self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
  45.         self.bn1 = nn.BatchNorm2d(64)
  46.         self.relu1 = nn.ReLU(inplace=True)

  47.         self.layer1 = nn.Sequential(
  48.             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
  49.             DenseBlock(64, growth_rate, layers_num[0]))

  50.         self.inplanes = 64 + growth_rate * layers_num[0]
  51.         self.layer2 = nn.Sequential(
  52.             TransitionToFeatureMap(self.inplanes, 128),
  53.             DenseBlock(128, growth_rate, layers_num[1]))

  54.         self.inplanes += growth_rate * layers_num[1]
  55.         self.layer3 = nn.Sequential(
  56.             TransitionToFeatureMap(self.inplanes, 256),
  57.             DenseBlock(256, growth_rate, layers_num[2]))

  58.         self.inplanes += growth_rate * layers_num[2]
  59.         self.layer4 = nn.Sequential(
  60.             TransitionToFeatureMap(self.inplanes, 512),
  61.             DenseBlock(512, growth_rate, layers_num[3]))

  62.         self.inplanes += growth_rate * layers_num[3]
  63.         self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
  64.         self.fc = nn.Linear(self.inplanes, num_classes)

  65.     def forward(self, x):
  66.         x = self.conv1(x)
  67.         x = self.bn1(x)
  68.         x = self.relu1(x)

  69.         x = self.layer1(x)
  70.         x = self.layer2(x)
  71.         x = self.layer3(x)
  72.         x = self.layer4(x)

  73.         x = self.avgpool(x)
  74.         x = x.view(x.size(0), -1)
  75.         x = self.fc(x)
  76.         x = torch.sigmoid(x)

  77.         return x

  78. def DenseNet121(num_classes):
  79.     return DenseNet(num_classes=num_classes,
  80.                     growth_rate=32,
  81.                     layers_num=[6,12,24,16])
复制代码


训练代码train.py:
  1. import torch
  2. import torch.nn as nn
  3. from torch.utils.data import DataLoader
  4. from torchvision.datasets import ImageFolder
  5. from torchvision import transforms
  6. from model import ResNet18
  7. import matplotlib.pyplot as plt

  8. # 随机种子
  9. torch.manual_seed(0)

  10. # 设置参数和数据路径
  11. lr = 0.0001
  12. num_epochs = 20
  13. batch_size = 8
  14. data_path = '/home/a504/mjw/Code/data_set/plantvillage_demo1'


  15. def main():
  16.     # 数据处理
  17.     transform_train = transforms.Compose([transforms.RandomResizedCrop(224),
  18.                                           transforms.RandomHorizontalFlip(),
  19.                                           transforms.ToTensor()])
  20.     train_dataset = ImageFolder(root=data_path + '/train/', transform=transform_train)
  21.     train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

  22.     transform_val = transforms.Compose([transforms.Resize(256),
  23.                                         transforms.CenterCrop(224),
  24.                                         transforms.ToTensor()])
  25.     val_dataset = ImageFolder(root=data_path + '/val/', transform=transform_val)
  26.     val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)

  27.     # 初始化模型
  28.     device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  29.     model = ResNet18(num_classes=len(train_dataset.classes)).to(device)

  30.     # define loss function and optimizer
  31.     criterion = nn.BCELoss()
  32.     optimizer = torch.optim.Adam(model.parameters(), lr=lr)

  33.     # 训练模型
  34.     train_losses = []
  35.     val_losses = []
  36.     for epoch in range(num_epochs):
  37.         # 训练
  38.         train_loss = 0.0
  39.         model.train()
  40.         for images, labels in train_loader:
  41.             images, labels = images.to(device), labels.to(device)
  42.             labels = nn.functional.one_hot(labels, num_classes=len(train_dataset.classes)).float()
  43.             optimizer.zero_grad()

  44.             # forward pass
  45.             outputs = model(images)
  46.             loss = criterion(outputs, labels)

  47.             # backward pass and optimize
  48.             loss.backward()
  49.             optimizer.step()

  50.             train_loss += loss.item() * images.size(0)
  51.         train_loss = train_loss / len(train_loader.dataset)
  52.         train_losses.append(train_loss)

  53.         # validation
  54.         val_loss = 0.0
  55.         model.eval()
  56.         with torch.no_grad():
  57.             for images, labels in val_loader:
  58.                 images, labels = images.to(device), labels.to(device)
  59.                 labels = nn.functional.one_hot(labels, num_classes=len(val_dataset.classes)).float()
  60.                 outputs = model(images)
  61.                 loss = criterion(outputs, labels)
  62.                 val_loss += loss.item() * images.size(0)
  63.             val_loss = val_loss / len(val_loader.dataset)
  64.             val_losses.append(val_loss)

  65.         print('Epoch [{}/{}], Train Loss: {:.4f}, Val Loss: {:.4f}'.
  66.               format(epoch + 1, num_epochs, train_loss, val_loss))

  67.     # 保存训练数据
  68.     torch.save(model.state_dict(), 'resnet18_multi_label_classification.pth')

  69.     # visualize the training process
  70.     plt.plot(range(num_epochs), train_losses, '-b', label='train')
  71.     plt.plot(range(num_epochs), val_losses, '-r', label='validation')
  72.     plt.legend(loc='lower right')
  73.     plt.xlabel('epoch')
  74.     plt.ylabel('loss')
  75.     plt.show()


  76. if __name__ == '__main__':
  77.     main()
复制代码


预测代码predict.py:
  1. import torch
  2. import cv2
  3. import numpy as np
  4. from model import ResNet18
  5. import matplotlib.pyplot as plt

  6. # 加载模型
  7. num_classes = 4
  8. model = ResNet18(num_classes)
  9. model.load_state_dict(torch.load('vgg16_multi_label_classification.pth'))
  10. model.eval()

  11. # 加载并预处理图像
  12. image_path = 'OIP-C.jpeg'
  13. image = cv2.imread(image_path)
  14. image = cv2.resize(image, (224, 224))
  15. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  16. image_normalized = (image / 255.0 - 0.5) / 0.5
  17. input_tensor = torch.from_numpy(image_normalized.transpose(2, 0, 1)).float().unsqueeze(0)

  18. # 预测和可视化预测结果
  19. with torch.no_grad():
  20.     output = model(input_tensor).squeeze().numpy()

  21. class_names = ['Apple___Apple_scab', 'Apple___Black_rot', 'Apple___Cedar_apple_rust', 'Apple___healthy']

  22. # acquire top2 and probabilities
  23. top_indices = output.argsort()[-2:][::-1]
  24. top_probabilities = output[top_indices]
  25. top_class_names = [class_names[i] for i in top_indices]

  26. # 在图像上标记预测结果及概率
  27. fig, ax = plt.subplots()
  28. ax.imshow(image)
  29. for idx, (class_name, prob) in enumerate(zip(top_class_names, top_probabilities)):
  30.     plt.text(5, 15 + 20 * idx, f'{class_names[idx]}: {prob:.2f}', fontsize=12, color='red')

  31. plt.show()
复制代码


测试代码evaluate.py:
  1. import torch
  2. import torch.nn as nn
  3. from torch.utils.data import DataLoader
  4. from torchvision.datasets import ImageFolder
  5. from torchvision import transforms
  6. from tqdm import tqdm
  7. from model import ResNet18

  8. # 常量和路径
  9. num_classes = 4
  10. model_path = './vgg16_multi_label_classification.pth'
  11. test_path = '/home/a504/mjw/Code/data_set/plantvillage_demo1/val'

  12. # 数据
  13. transform_test = transforms.Compose([transforms.Resize(256),
  14.                                      transforms.CenterCrop(224),
  15.                                      transforms.ToTensor()])
  16. test_dataset = ImageFolder(test_path, transform=transform_test)
  17. test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

  18. # 模型
  19. model = ResNet18(num_classes)
  20. model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
  21. model.eval()

  22. # define loss function
  23. criterion = nn.BCELoss()

  24. # 处理模型
  25. hamming_loss = []
  26. accuracy = []
  27. precision = []
  28. recall = []
  29. f_beta = []

  30. with torch.no_grad():
  31.     for idx, (images, labels) in tqdm(enumerate(test_loader), total=len(test_loader)):
  32.         outputs = model(images)
  33.         predicted = (outputs > 0.5).float()  # threshold = 0.5

  34.         hamming_loss.append((predicted != labels.float()).sum().item() / (num_classes * len(labels)))

  35.         tp = ((predicted == 1) & (labels.float() == 1)).sum().item()
  36.         fp = ((predicted == 1) & (labels.float() == 0)).sum().item()
  37.         tn = ((predicted == 0) & (labels.float() == 0)).sum().item()
  38.         fn = ((predicted == 0) & (labels.float() == 1)).sum().item()

  39.         accuracy.append((tp + tn) / (num_classes * len(labels)))
  40.         precision.append(tp / (tp + fp + 1e-7))
  41.         recall.append(tp / (tp + fn + 1e-7))
  42.         f_beta.append(((1 + 0.5 ** 2) * precision[-1] * recall[-1]) / ((0.5 ** 2) * precision[-1] + recall[-1] + 1e-7))

  43. print("Hamming Loss: {:.4f}".format(sum(hamming_loss) / len(hamming_loss)))
  44. print("Accuracy: {:.4f}".format(sum(accuracy) / len(accuracy)))
  45. print("Precision: {:.4f}".format(sum(precision) / len(precision)))
  46. print("Recall: {:.4f}".format(sum(recall) / len(recall)))
  47. print("F-beta Score: {:.4f}".format(sum(f_beta) / len(f_beta)))
复制代码

小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

发表于 2023-4-23 16:57:35 | 显示全部楼层
为了修改训练代码、预测代码和测试代码以适应ResNet和DenseNet模型,需要根据所选模型导入适当的模型类并更改模型实例化部分。下面提供了修改后的代码:

ResNet训练代码(train_resnet.py):
  1. from model_resnet import ResNet18  # 修改这一行

  2. # 其他代码保持不变

  3. # 初始化模型
  4. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  5. model = ResNet18(num_classes=len(train_dataset.classes)).to(device)  # 修改这一行
复制代码


DenseNet训练代码(train_densenet.py):
  1. from model_densenet import DenseNet121  # 修改这一行

  2. # 其他代码保持不变

  3. # 初始化模型
  4. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  5. model = DenseNet121(num_classes=len(train_dataset.classes)).to(device)  # 修改这一行
复制代码


ResNet预测代码(predict_resnet.py):
  1. from model_resnet import ResNet18  # 修改这一行

  2. # 其他代码保持不变

  3. # 加载模型
  4. num_classes = 4
  5. model = ResNet18(num_classes)  # 修改这一行
  6. model.load_state_dict(torch.load('resnet18_multi_label_classification.pth'))
  7. model.eval()
复制代码


DenseNet预测代码(predict_densenet.py):
  1. from model_densenet import DenseNet121  # 修改这一行

  2. # 其他代码保持不变

  3. # 加载模型
  4. num_classes = 4
  5. model = DenseNet121(num_classes)  # 修改这一行
  6. model.load_state_dict(torch.load('densenet121_multi_label_classification.pth'))
  7. model.eval()
复制代码


ResNet测试代码(evaluate_resnet.py):
  1. from model_resnet import ResNet18  # 修改这一行

  2. # 其他代码保持不变

  3. # 模型
  4. model = ResNet18(num_classes)  # 修改这一行
  5. model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
  6. model.eval()
复制代码


DenseNet测试代码(evaluate_densenet.py):
  1. from model_densenet import DenseNet121  # 修改这一行

  2. # 其他代码保持不变

  3. # 模型
  4. model = DenseNet121(num_classes)  # 修改这一行
  5. model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
  6. model.eval()
复制代码


注意:需要根据所选模型将 `train.py`、`predict.py` 和 `evaluate.py` 重命名为 `train_resnet.py`、`predict_resnet.py`、`evaluate_resnet.py` 或 `train_densenet.py`、`predict_densenet.py`、`evaluate_densenet.py`。同时,确保在对应的 `model_resnet.py` 和 `model_densenet.py` 中包含了所选网络的代码。
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

 楼主| 发表于 2023-4-23 17:04:30 | 显示全部楼层
本帖最后由 黎明丿晓小 于 2023-4-23 17:08 编辑
isdkz 发表于 2023-4-23 16:57
为了修改训练代码、预测代码和测试代码以适应ResNet和DenseNet模型,需要根据所选模型导入适当的模型类并更 ...


训练代码ResNet这样改汇报错:
Traceback (most recent call last):
  File "/home/a504/mjw/Code/Multi-label_classification/ResNet_ML/train.py", line 94, in <module>
    main()
  File "/home/a504/mjw/Code/Multi-label_classification/ResNet_ML/train.py", line 54, in main
    outputs = model(images)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/Code/Multi-label_classification/ResNet_ML/model.py", line 61, in forward
    out = self.linear(out)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 103, in forward
    return F.linear(input, self.weight, self.bias)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/functional.py", line 1848, in linear
    return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (8x25088 and 512x4)
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

发表于 2023-4-23 17:06:06 | 显示全部楼层
黎明丿晓小 发表于 2023-4-23 17:04
训练代码这样改汇报错:
Traceback (most recent call last):
  File "/home/a504/mjw/Code/Multi-labe ...

这个错误提示是由于在矩阵乘法操作中矩阵形状不匹配导致的。错误提示中,mat1 的形状为 (8x25088),mat2 的形状为 (512x4)。根据矩阵乘法的规则,mat1 的列数应该等于 mat2 的行数,但在这里并不满足这个条件。

请查看您的代码,尤其是涉及到这两个矩阵的部分,确保矩阵形状是正确的。如果您在 ResNet_ML/model.py 的第 61 行定义了一个线性层,请检查输入特征的数量是否与该线性层的输入大小匹配。此外,请检查在您的模型中的任何卷积或池化层,以确保它们在传递给线性层之前正确地调整了特征大小。

如果您能提供有关模型结构的更多详细信息,我将更好地帮助您解决这个问题。
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

 楼主| 发表于 2023-4-23 17:10:33 | 显示全部楼层
isdkz 发表于 2023-4-23 17:06
这个错误提示是由于在矩阵乘法操作中矩阵形状不匹配导致的。错误提示中,mat1 的形状为 (8x25088),mat2  ...

ResNet模型信息已经提供如上,另外,DenseNet运行训练代码报错:
Traceback (most recent call last):
  File "/home/a504/mjw/Code/Multi-label_classification/DenseNet_ML/train.py", line 94, in <module>
    main()
  File "/home/a504/mjw/Code/Multi-label_classification/DenseNet_ML/train.py", line 54, in main
    outputs = model(images)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/Code/Multi-label_classification/DenseNet_ML/model.py", line 88, in forward
    x = self.layer1(x)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/container.py", line 141, in forward
    input = module(input)
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/Code/Multi-label_classification/DenseNet_ML/model.py", line 39, in forward
    y = block(torch.cat(features,dim=1))
  File "/home/a504/mjw/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
    return forward_call(*input, **kwargs)
  File "/home/a504/mjw/Code/Multi-label_classification/DenseNet_ML/model.py", line 26, in forward
    out += residual
RuntimeError: The size of tensor a (32) must match the size of tensor b (64) at non-singleton dimension 1
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

发表于 2023-4-23 17:14:59 | 显示全部楼层
黎明丿晓小 发表于 2023-4-23 17:10
ResNet模型信息已经提供如上,另外,DenseNet运行训练代码报错:
Traceback (most recent call last):
...

这个错误是由于在执行逐元素相加操作时,两个张量的形状不匹配导致的。错误提示中,张量a的大小为32,张量b的大小为64,它们在非单例维度(非1维度)上的大小必须匹配。

在您的代码中,这个错误是由 "out += residual" 这一行引起的。为了解决这个问题,您需要确保在执行逐元素相加操作之前,out 和 residual 张量的形状是匹配的。

请检查您的 DenseNet_ML/model.py 文件中的第 26 行,以及与之相关的所有代码。通常,这个问题是由于在创建残差连接时未正确调整特征图大小或通道数量导致的。在 DenseNet 中,一般是通过卷积或池化层来调整特征图的大小或通道数量。

如果您提供有关您的 DenseNet 模型结构的更多详细信息,我将更好地帮助您解决这个问题。
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

 楼主| 发表于 2023-4-23 17:18:52 | 显示全部楼层
isdkz 发表于 2023-4-23 17:14
这个错误是由于在执行逐元素相加操作时,两个张量的形状不匹配导致的。错误提示中,张量a的大小为32,张 ...

ResNet模型结构和DenseNet模型结构已经提供如上
小甲鱼最新课程 -> https://ilovefishc.com
回复

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

小黑屋|手机版|Archiver|鱼C工作室 ( 粤ICP备18085999号-1 | 粤公网安备 44051102000585号)

GMT+8, 2025-6-28 23:49

Powered by Discuz! X3.4

© 2001-2023 Discuz! Team.

快速回复 返回顶部 返回列表