“”"
“”"
import torch
from torch.autograd import Variable
batch_n = 64
input_data = 1000
hidden_layer = 100
output_data = 10
‘’’
除了可以采用自动梯度方法,还可以通过构建一个继承了torch.nn.Module的新类,来完成对前向传播函数和后向传播函数的重写
新类中,使用forward作为前向传播函数的关键字,backword作为后向传播函数的关键字
‘’’
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, input, w1, w2):
x = torch.mm(input, w1)
x = torch.clamp(x, min=0)
x = torch.mm(x, w2)
return x
def backward(self):
pass
‘’’
这里的变量赋值、训练次数和学习速率的定义,以及模型训练和参数优化使用的代码,合在上一节aautograd中使用的代码没有太大差异
不同的是,我们的模型通过 y_pred=model(x,w1,w2) 来完成对模型预测值的输出,并且整个训练部分的代码都被简化了。
‘’’
model = Model()
x = Variable(torch.randn(batch_n, input_data), requires_grad=False)
y = Variable(torch.randn(batch_n, output_data), requires_grad=False)
w1 = Variable(torch.randn(input_data, hidden_layer), requires_grad=True)
w2 = Variable(torch.randn(hidden_layer, output_data), requires_grad=True)
epoch_n = 20
learning_rate = 1e-6
for epoch in range(epoch_n):
y_pred = model(x, w1, w2)
loss = (y_pred - y).pow(2).sum()
print("Epoch:{},Loss:{:.4f}".format(epoch, loss.data[0]))
loss.backword()
w1 -= learning_rate * w1.grad.data
w2 -= learning_rate * w1.grad.data
w1.grad.data.zero_()
w2.grad.data.zero_()
‘’’
20次训练后,20个loss值的打印输出如下:
Epoch:0,Loss:42159108.0000
Epoch:1,Loss:66748248.0000
Epoch:2,Loss:233537776.0000
Epoch:3,Loss:654288896.0000
Epoch:4,Loss:289585344.0000
Epoch:5,Loss:11103424.0000
Epoch:6,Loss:6646614.0000
Epoch:7,Loss:4412858.0000
Epoch:8,Loss:3143432.2500
Epoch:9,Loss:2366286.5000
Epoch:10,Loss:1864644.0000
Epoch:11,Loss:1525999.5000
Epoch:12,Loss:1288064.8750
Epoch:13,Loss:1114480.5000
Epoch:14,Loss:983444.3125
Epoch:15,Loss:880959.1875
Epoch:16,Loss:798234.3750
Epoch:17,Loss:729681.0000
Epoch:18,Loss:671618.0625
Epoch:19,Loss:621518.6250