神经网络
Pytorch中使用torch.nn模块构建神经网络。
nn.Module包括神经网络的各层;forward(input)用来返回输出output。本章记录神经网络的构建及其训练过程的步骤。具体训练过程在后续章节记录。
1.定义神经网络
import torch
import torch.nn as nn
import torch.nn.functional as F
# 定义网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)# 输入1输出6通道;卷积核大小5x5。
self.conv2 = nn.Conv2d(6, 16, 5)
# 全连接层
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
# 前向传播
def forward(self, x):
# 卷积+relu+最大池化2x2
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# 输出
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
查看模型参数
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
# 输出
10
torch.Size([6, 1, 5, 5])
2.更新网络参数
随机生成一个 32x32 的输入,输入网络,得到输出。
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
# 输出
tensor([[-0.0233, 0.0159, -0.0249, 0.1413, 0.0663, 0.0297, -0.0940, -0.0135,
0.1003, -0.0559]], grad_fn=<AddmmBackward>)
将所有参数梯度归零,用随机的梯度进行反向传播。
net.zero_grad()
out.backward(torch.randn(1, 10))
3.损失函数
损失函数需要一对输入,分别为模型输出值和目标值。然后计算两者之间损失值,评估输出离目标值有多远。
output = net(input)
target = torch.randn(10) # a dummy target, for example
target = target.view(1, -1) # make it the same shape as output
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
# 输出
tensor(1.3389, grad_fn=<MseLossBackward>)
4.反向传播
net.zero_grad()# 首先清空现存的梯度
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
# conv1.bias.grad before backward
tensor([0., 0., 0., 0., 0., 0.])
conv1.bias.grad after backward
tensor([-0.0054, 0.0011, 0.0012, 0.0148, -0.0186, 0.0087])
5.更新神经网络参数
import torch.optim as optim
# 随机梯度下降
optimizer = optim.SGD(net.parameters(), lr=0.01)
# 在循环迭代中运用:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update