目录
代码实现(将Inception Module模块封装为类,减少代码冗余 )
1、基础知识
简单网络结构
之前在动手学习深度学习中用到的类似于 LeNet5 这种简单的串行网络结构。.其余的还有 AlexNet和 VGG 这种串行结构的网络!
复杂网络结构
GoogLeNet
Inception Module
不知道选择哪个卷积核比较好,所以都使用,最后沿着通道拼接即可
做MaxPooling池化的时候,通道数 C 不变, W 和 H 变为原来的一半。所以为了解决这种问题,使用 均值池化 ,通过指定 Padding、Stride来保持池化前后的 W 和 H 一致
1*1卷积核
信息融合:输出结果中间位置的元素包括了原来三个通道中间位置的像素信息
1*1卷积的作用
改变通道数,降低运算量,节省时间
实现Inception Module实现
沿着通道的维度(dim = 1)拼接
代码实现(将Inception Module模块封装为类,减少代码冗余 )
class InceptionA(nn.Module):
def __init__(self, in_channels):
super(InceptionA, self).__init__()
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1) # b,c,w,h c对应的是dim=1
2、代码实现
代码说明:1、先使用类对Inception Moudel进行封装
2、先是1个卷积层(conv,maxpooling,relu),然后inceptionA模块(输出的channels是24+16+24+24=88),接下来又是一个卷积层(conv,mp,relu),然后inceptionA模块,最后一个全连接层(fc)。
3、1408这个数据可以通过x = x.view(in_size, -1)后调用x.shape得到。
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
# prepare dataset
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # 归一化,均值和方差
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
# design model using class
class InceptionA(nn.Module):
def __init__(self, in_channels):
super(InceptionA, self).__init__()
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1) # b,c,w,h c对应的是dim=1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(88, 20, kernel_size=5) # 88 = 24x3 + 16
self.incep1 = InceptionA(in_channels=10) # 与conv1 中的10对应
self.incep2 = InceptionA(in_channels=20) # 与conv2 中的20对应
self.mp = nn.MaxPool2d(2)
self.fc = nn.Linear(1408, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.incep1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.incep2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
model = Net()
# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# training cycle forward, backward, update
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
#print('accuracy on test set: %d %% ' % (100 * correct / total))
return correct / total
if __name__ == '__main__':
epoch_list = []
acc_list = []
for epoch in range(10):
train(epoch)
acc = test()
epoch_list.append(epoch)
acc_list.append(acc)
plt.plot(epoch_list, acc_list)
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
G:\python_files\DeepLearning\Scripts\python.exe G:/python_files/DeepLearningProgram/Inception网络.py
[1, 300] loss: 0.787
[1, 600] loss: 0.188
[1, 900] loss: 0.140
[2, 300] loss: 0.104
[2, 600] loss: 0.099
[2, 900] loss: 0.089
[3, 300] loss: 0.074
[3, 600] loss: 0.074
[3, 900] loss: 0.071
[4, 300] loss: 0.062
[4, 600] loss: 0.068
[4, 900] loss: 0.057
[5, 300] loss: 0.056
[5, 600] loss: 0.056
[5, 900] loss: 0.055
[6, 300] loss: 0.051
[6, 600] loss: 0.049
[6, 900] loss: 0.049
[7, 300] loss: 0.046
[7, 600] loss: 0.047
[7, 900] loss: 0.043
[8, 300] loss: 0.038
[8, 600] loss: 0.043
[8, 900] loss: 0.044
[9, 300] loss: 0.039
[9, 600] loss: 0.038
[9, 900] loss: 0.038
[10, 300] loss: 0.036
[10, 600] loss: 0.034
[10, 900] loss: 0.035
3、层数太多,解决梯度消失问题
底层原理
1、要解决的问题:梯度消失
2、跳连接,H(x) = F(x) + x,张量维度必须一样,也即输入 x 的张量必须和残差块的输出 F(x)的张量大小一致。然后加完后再激活,加之前不要做pooling,否则张量的维度会发生变化。
网络模型
代码实现
封装残差块
由于 残差块 是重复调用的,所以封装为一个单独的类比较好
跳连接,H(x) = F(x) + x,张量维度必须一样,也即输入 x 的张量必须和残差块的输出 F(x)的张量大小一致。然后加完后再激活,加之前不要做pooling,否则张量的维度会发生变化
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x + y)
全部代码实现
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
# prepare dataset
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # 归一化,均值和方差
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
# design model using class
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x + y)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5) # 88 = 24x3 + 16
self.rblock1 = ResidualBlock(16)
self.rblock2 = ResidualBlock(32)
self.mp = nn.MaxPool2d(2)
self.fc = nn.Linear(512, 10) # 暂时不知道1408咋能自动出来的
def forward(self, x):
in_size = x.size(0)
x = self.mp(F.relu(self.conv1(x)))
x = self.rblock1(x)
x = self.mp(F.relu(self.conv2(x)))
x = self.rblock2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
model = Net()
# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# training cycle forward, backward, update
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('accuracy on test set: %d %% ' % (100 * correct / total))
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
G:\python_files\DeepLearning\Scripts\python.exe G:/python_files/DeepLearningProgram/残差网络.py
[1, 300] loss: 0.563
[1, 600] loss: 0.160
[1, 900] loss: 0.107
accuracy on test set: 97 %
[2, 300] loss: 0.088
[2, 600] loss: 0.076
[2, 900] loss: 0.073
accuracy on test set: 98 %
[3, 300] loss: 0.060
[3, 600] loss: 0.058
[3, 900] loss: 0.056
accuracy on test set: 98 %
[4, 300] loss: 0.047
[4, 600] loss: 0.044
[4, 900] loss: 0.047
accuracy on test set: 98 %
[5, 300] loss: 0.038
[5, 600] loss: 0.044
[5, 900] loss: 0.038
accuracy on test set: 98 %
[6, 300] loss: 0.035
[6, 600] loss: 0.033
[6, 900] loss: 0.038
accuracy on test set: 99 %
[7, 300] loss: 0.030
[7, 600] loss: 0.032
[7, 900] loss: 0.031
accuracy on test set: 99 %
[8, 300] loss: 0.026
[8, 600] loss: 0.026
[8, 900] loss: 0.030
accuracy on test set: 99 %
[9, 300] loss: 0.021
[9, 600] loss: 0.026
[9, 900] loss: 0.027
accuracy on test set: 99 %
[10, 300] loss: 0.021
[10, 600] loss: 0.024
[10, 900] loss: 0.021
accuracy on test set: 99 %