softmax训练过程:
代码:
def train_epoch_ch3(net, train_iter, loss, updater):
"""训练模型一个迭代周期"""
if isinstance(net, torch.nn.Module):
net.train()
#将模型设置为训练模式
metric = Accumulator(3)
#3代表训练损失总和,训练准确度总和,样本数(运用了accumulator类)
for X, y in train_iter:
y_hat = net(X)
#计算y_hat
l = loss(y_hat, y)
#计算梯度并且更新参数
if isinstance(updater, torch.optim.Optimizer):
#使用pytorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else:
#使用定制的优化器和损失函数
l.sum().backward()
#向量要求和再算梯度
updater(X.shape[0])
#优化函数
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
#返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2]
#metric[0]是loss的累加,metric[2]是样本数,metric[1]是分类正确的数(在上面
#的add中添加)
上面这段代码定义了一次迭代的过程。
为方便观看,用动画实现:
代码:
class Animator: #@save
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
matplotlib,看下就行。
训练函数:
代码:
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
#train_metrics用于保存训练损失和训练精度
test_acc = evaluate_accuracy(net, test_iter)
#test_acc用于存放评估的net精度,即,运算正确的数量/总数量
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
小批量随机梯度下降:
代码:
lr = 0.1
#lr为学习率
def updater(batch_size):
return d2l.sgd([w,b], lr, batch_size)
#这里使用sgd作为优化函数
开始训练,10个周期:
代码:
num_epochs = 10
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, updater)
10次结果:
20次结果: