构建神经网络
1.定义一个包含可训练参数的神经网络
2.迭代整个输入
3.通过神经网络处理输入
4.计算损失(loss)
5.误差反向传播
6.更新网络的参数,典型的用一个简单的更新方法:weight = weight - learning_rate *gradient
import torch
import torch.nn as nn
import torch.nn.functional as F
# 构建一个卷积神经网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolution leyer 1
# input channels=1, output channels=6, convolution kernel size=5*5
self.conv1 = nn.Conv2d(1, 6, 5)
# # convolution leyer 2
# input channels=6, output channels=16, convolution kernel size=5*5
self.conv2 = nn.Conv2d(6, 16, 5)
# 线性的全连接层 y = Wx + b
# input=16 * 5 * 5, output=120
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
# 定义神经网络的前向传播函数
def forward(self, x):
# 池化没有参数的更新,故用torch.nn.functional.max_pool2d,并可以再前向传播中定义,同理激活函数relu也没有参数的更新。
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# 多维度的tensor变成一个向量,喂入全连接层
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# 实例化一个卷积神经网络
net = Net()
print(net)
卷积神经网络的结构如下
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
一个模型可训练的参数可以通过调用 net.parameters() 返回:
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
output:
10
torch.Size([6, 1, 5, 5])
随机生成一个 32x32 的输入喂给卷积神经网络。
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
output:
tensor([[ 0.0524, 0.0926, -0.0605, 0.0834, -0.0337, -0.1213, 0.0878, 0.0792,
0.0281, -0.0800]], grad_fn=<AddmmBackward>)
#把所有参数梯度缓存器置零,用随机的梯度来反向传播
net.zero_grad()
out.backward(torch.randn(1, 10))
为了实现反向传播损失,需要清空现存的梯度,要不然将会和现存的梯度累计到一起。现在我们调用 loss.backward() ,然后看一下 con1 的偏置项在反向传播之前和之后的变化。
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
output:
conv1.bias.grad before backward
tensor([0., 0., 0., 0., 0., 0.])
conv1.bias.grad after backward
tensor([ 0.0074, 0.0025, -0.0273, -0.0070, -0.0109, -0.0132])