softmax回归的输出层是一个全连接层,所以我们用一个线性模块就可以了。
因为前面我们数据返回的每个batch样本x的形状为
(batch_size, 1, 28, 28),
所以我们要先用view()将x的形状转换成
(batch_size, 784)才送入全连接层。
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
from collections import OrderedDict
# 获取和读取数据-----------------------------------------------
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
#定义和初始化模型---------------------------------------------
num_inputs = 784
num_outputs = 10
class LinearNet(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(LinearNet, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x): # x shape: (batch, 1, 28, 28)
print(x.shape[0])
y = self.linear(x.view(x.shape[0], -1))
return y
net = LinearNet(num_inputs, num_outputs)
# 本函数已保存在d2lzh_pytorch包中方便以后使用
#将对x的形状转换的这个功能自定义一个FlattenLayer
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape: (batch, *, *, ...)
return x.view(x.shape[0], -1)
net = nn.Sequential(
# FlattenLayer(),
# nn.Linear(num_inputs, num_outputs)
OrderedDict([
('flatten', FlattenLayer()),
('linear', nn.Linear(num_inputs, num_outputs))
])
)
#使用均值为0、标准差为0.01的正态分布随机初始化模型的权重参数。
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
#softmax和交叉熵损失函数---------------------------------------
loss = nn.CrossEntropyLoss()
#定义优化算法-----------------------------------------------
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
#训练模型-----------------------------------------------------
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
关于d2lzh_pytorch这个包,我会在一篇博客中持续更新