目录
4.3 自动梯度计算
虽然我们能够通过模块化的方式比较好地对神经网络进行组装,但是每个模块的梯度计算过程仍然十分繁琐且容易出错。在深度学习框架中,已经封装了自动梯度计算的功能,我们只需要聚焦模型架构,不再需要耗费精力进行计算梯度。
torch提供了torch.nn.Module类,来方便快速的实现自己的层和模型。模型和层都可以基于torch.nn.Moduler扩充实现,模型只是一种特殊的层。继承了torch.nn.Module类的算子中,可以在内部直接调用其它继承torch.nn.Module类的算子,torch框架会自动识别算子中内嵌的此类算子,并自动计算它们的梯度,并在优化时更新它们的参数。
4.3.1 利用预定义算子重新实现前馈神经网络
使用torch的预定义算子来重新实现二分类任务。主要使用到torch.nn.Module。
1. 使用pytorch的预定义算子来重新实现二分类任务。
import torch.nn as nn
import torch
from torch.nn.init import normal_,constant_
import torch.nn.functional as F
class Model_MLP_L2_V2(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Model_MLP_L2_V2, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
normal_(self.fc1.weight, mean=0., std=1.)
constant_(self.fc1.bias, val=0.0)
self.fc2 = nn.Linear(hidden_size, output_size)
normal_(self.fc2.weight, mean=0., std=1.)
constant_(self.fc2.bias, val=0.0)
self.act_fn = torch.sigmoid
# 前向计算
def forward(self, inputs):
z1 = self.fc1(inputs)
a1 = self.act_fn(z1)
z2 = self.fc2(a1)
a2 = self.act_fn(z2)
return a2
2. 增加一个3个神经元的隐藏层,再次实现二分类。
class Model_MLP_L2_V2(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Model_MLP_L2_V2, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
normal_(self.fc1.weight, mean=0., std=1.)
constant_(self.fc1.bias, val=0.0)
self.fc2 = nn.Linear(hidden_size, hidden_size)
normal_(self.fc2.weight, mean=0., std=1.)
constant_(self.fc2.bias, val=0.0)
self.act_fn = torch.sigmoid
self.fc3 = nn.Linear(hidden_size, output_size)
normal_(self.fc3.weight, mean=0., std=1.)
constant_(self.fc3.bias, val=0.0)
self.act_fn = torch.sigmoid
# 前向计算
def forward(self, inputs):
z1 = self.fc1(inputs)
a1 = self.act_fn(z1)
z2 = self.fc2(a1)
a2 = self.act_fn(z2)
z3 = self.fc3(a2)
a3 = self.act_fn(z3)
return a3
4.3.2 完善Runner类
基于上一节实现的 RunnerV2_1
类,本节的 RunnerV2_2 类在训练过程中使用自动梯度计算;模型保存时,使用state_dict
方法获取模型参数;模型加载时,使用set_state_dict
方法加载模型参数.
class RunnerV2_2(object):
def __init__(self, model, optimizer, metric, loss_fn, **kwargs):
self.model = model
self.optimizer = optimizer
self.loss_fn = loss_fn
self.metric = metric
# 记录训练过程中的评估指标变化情况
self.train_scores = []
self.dev_scores = []
# 记录训练过程中的评价指标变化情况
self.train_loss = []
self.dev_loss = []
def train(self, train_set, dev_set, **kwargs):
# 将模型切换为训练模式
self.model.train()
# 传入训练轮数,如果没有传入值则默认为0
num_epochs = kwargs.get("num_epochs", 0)
# 传入log打印频率,如果没有传入值则默认为100
log_epochs = kwargs.get("log_epochs", 100)
# 传入模型保存路径,如果没有传入值则默认为"best_model.pdparams"
save_path = kwargs.get("save_path", "best_model.pdparams")
# log打印函数,如果没有传入则默认为"None"
custom_print_log = kwargs.get("custom_print_log", None)
# 记录全局最优指标
best_score = 0
# 进行num_epochs轮训练
for epoch in range(num_epochs):
X, y = train_set
# 获取模型预测
logits = self.model(X)
# 计算交叉熵损失
trn_loss = self.loss_fn(logits, y)
self.train_loss.append(trn_loss.item())
# 计算评估指标
trn_score = self.metric(logits, y).item()
self.train_scores.append(trn_score)
# 自动计算参数梯度
trn_loss.backward()
if custom_print_log is not None:
# 打印每一层的梯度
custom_print_log(self)
# 参数更新
self.optimizer.step()
# 清空梯度
self.optimizer.zero_grad()
dev_score, dev_loss = self.evaluate(dev_set)
# 如果当前指标为最优指标,保存该模型
if dev_score > best_score:
self.save_model(save_path)
print(f"[Evaluate] best accuracy performence has been updated: {best_score:.5f} --> {dev_score:.5f}")
best_score = dev_score
if log_epochs and epoch % log_epochs == 0:
print(f"[Train] epoch: {epoch}/{num_epochs}, loss: {trn_loss.item()}")
# 模型评估阶段,使用'paddle.no_grad()'控制不计算和存储梯度
@torch.no_grad()
def evaluate(self, data_set):
# 将模型切换为评估模式
self.model.eval()
X, y = data_set
# 计算模型输出
logits = self.model(X)
# 计算损失函数
loss = self.loss_fn(logits, y).item()
self.dev_loss.append(loss)
# 计算评估指标
score = self.metric(logits, y).item()
self.dev_scores.append(score)
return score, loss
def predict(self, X):
# 将模型切换为评估模式
self.model.eval()
return self.model(X)
# 使用'model.state_dict()'获取模型参数,并进行保存
def save_model(self, saved_path):
torch.save(self.model.state_dict(), saved_path)
# 使用'model.set_state_dict'加载模型参数
def load_model(self, model_path):
state_dict = torch.load(model_path)
self.model.set_state_dict(state_dict)
4.3.3 模型训练
实例化RunnerV2类,并传入训练配置,代码实现如下:
1. 使用pytorch的预定义算子来重新实现二分类任务。
# 设置模型
input_size = 2
hidden_size = 5
output_size = 1
model = Model_MLP_L2_V2(input_size=input_size, hidden_size=hidden_size, output_size=output_size)
# 设置损失函数
loss_fn = F.binary_cross_entropy
# 设置优化器
learning_rate = 0.2
optimizer = torch.optim.SGD(model.parameters(), learning_rate)
# 设置评价指标
metric = accuracy
# 其他参数
epoch_num = 1000
saved_path = 'best_model.pdparams'
# 实例化RunnerV2类,并传入训练配置
runner = RunnerV2_2(model, optimizer, metric, loss_fn)
runner.train([X_train, y_train], [X_dev, y_dev], num_epochs=epoch_num, log_epochs=50, save_path="best_model.pdparams")
结果:
[Evaluate] best accuracy performence has been updated: 0.00000 --> 0.45000
[Train] epoch: 0/1000, loss: 0.8735500574111938
[Evaluate] best accuracy performence has been updated: 0.45000 --> 0.46250
[Evaluate] best accuracy performence has been updated: 0.46250 --> 0.56250
[Evaluate] best accuracy performence has been upda