一、前言
- 多层感知机通过 隐藏层 + 非线性激活函数 的方式来得到非线性模型,解决了感知机不能处理XOR分类的问题,多层感知机理论上可以拟合任何一种函数
- 常用的非线性激活函数是 Sigmoid、Tanh、Relu
- 输出层接Softmax用来处理多分类问题
- 超参数为隐藏层的个数和各个隐藏层的大小
二、实现
import torch
from torch import nn
from d2l import torch as d2l
import torchvision
from torch.utils import data
from torchvision import transforms
def load_data_fashion_mnist(batch_size, resize=None):
"""
下载mnist数据集到指定目录,按批次加载到内存并返回迭代器
:param batch_size: # 每批次加载的数据量
:param resize: # 放大或缩小图片
:return:
"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root=r'../data',
train=True,
transform=trans,
download=True
)
mnist_test = torchvision.datasets.FashionMNIST(
root=r'../data',
train=False,
transform=trans,
download=True
)
train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True)
test_iter = data.DataLoader(mnist_test, batch_size, shuffle=False)
return (train_iter, test_iter)
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
def accuracy(y_hat, y):
'''
:param y_hat: 接收二维张量,例如 torch.tensor([[1], [0]...])
:param y: 接收二维张量,例如 torch.tensor([[0.1, 0.2, 0.7], [0.8, 0.1, 0.1]...]) 三分类问题
:return:
'''
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y
return float(cmp.type(y.dtype).sum())
class Accumulator():
''' 对评估的正确数量和总数进行累加 '''
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, item):
return self.data[item]
def evaluate_accuracy(net, data_iter):
''' 计算在指定数据集上的模型精度 '''
if isinstance(net, torch.nn.Module):
net.eval()
metric = Accumulator(2)
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
from IPython import display
class Animator():
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
d2l.plt.draw()
d2l.plt.pause(0.001)
display.clear_output(wait=True)
def train_epoch_ch3(net, train_iter, loss, updater):
if isinstance(net, torch.nn.Module):
net.train()
metric = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.backward()
updater.step()
metric.add(
float(l)*len(y),
accuracy(y_hat, y),
y.size().numel()
)
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.size().numel())
return metric[0]/metric[2], metric[1]/metric[2]
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型 """
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
if __name__ == '__main__':
net = nn.Sequential(
nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10)
)
net.apply(init_weights)
loss = nn.CrossEntropyLoss()
updater = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs, batch_size = 10, 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
print(evaluate_accuracy(net, test_iter))
train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)