import torch
from torch import nn
from d2l import torch as d2l
net = nn.Sequential(
#数据集是 单通道 28*28的
nn.Conv2d(1, 6, kernel_size=5, padding=2),
nn.Sigmoid(),
#这层完了后变成6*14*14
nn.AvgPool2d(kernel_size=2, stride=2),
#输入是6通道 ,输出是16通道 ,5*5的
nn.Conv2d(6, 16, kernel_size=5),
nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
#变成1维的向量
nn.Flatten(),
nn.Linear(5 * 5 * 16, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10)
)
print(net)
batch_size = 8
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
def evaluate_accuracy_gpu(net, data_iter, device=None): #@save
"""使用GPU计算模型在数据集上的精度"""
if isinstance(net, nn.Module):
net.eval() # 设置为评估模式
if not device:
device = next(iter(net.parameters())).device
# 正确预测的数量,总预测的
LeNet +课后练习 + 快问快答
最新推荐文章于 2023-08-07 13:12:42 发布