CNN 基础
分为特征提取和分类两个任务
一张图片分为R、G、B三通道,每个Patch的通道数与原来一致,但是宽和高与原来不同
卷积操作的滑动模块
单通道输入卷积
3×3的卷积核,会导致输出比输入少外面一整圈,即长、宽都减少2
多通道输入卷积
- 卷积核只要求第一个通道数量要和输入通道数量一致
- 常见的卷积核宽高为:1×1 3×3 5×5 多为奇数,但也可以为偶数
输出为多通道的卷积
每次对输入进行一次卷积核计算,就生成一个通道,所以对输入进行多次卷积核计算就能生成多个通道
其中的卷积核尺寸
- 卷积核维度为4,包括需要输出的m通道,输入的n通道,以及卷积核自身的宽高w、h
代码展示1
import torch
in_channels, out_channels = 5, 10 #输入为5通道,输出为10通道
width, height = 100, 100
kernel_size = 3 #3×3卷积核
batch_size = 1
#返回一个batch_size行in_channels列的张量,其中每个元素又是一个width行height列张量,最小元素的每一行服从均值为0,方差为1的正态分布
input = torch.randn(batch_size,
in_channels,
width,
height)
#二维卷积
conv_layer = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size)
output = conv_layer(input)
print(input.shape) #input:1,5,100,100
print(output.shape) #output:1,10,98,98 因为3×3卷积核,会把长宽各减少2
print(conv_layer.weight.shape) #卷积核:10,5,3,3
torch.Size([1, 5, 100, 100])
torch.Size([1, 10, 98, 98])
torch.Size([10, 5, 3, 3])
padding
- 含义为需要拓展的原输入圈数,例如padding=1,则拓展一圈,用0填充拓展的空间
代码展示2
import torch
input = [3, 4, 6, 5, 7,
2, 4, 6, 8, 2,
1, 6, 7, 8, 4,
9, 7, 4, 6, 2,
3, 7, 5, 4, 1]
input = torch.Tensor(input).view(1, 1, 5, 5)
conv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False)
Kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)
conv_layer.weight.data = Kernel.data
output = conv_layer(input)
print(output)
tensor([[[[ 91., 168., 224., 215., 127.],
[114., 211., 295., 262., 149.],
[192., 259., 282., 214., 122.],
[194., 251., 253., 169., 86.],
[ 96., 112., 110., 68., 31.]]]], grad_fn=<ConvolutionBackward0>)
代码演示3
import torch
input = [3, 4, 6, 5, 7,
2, 4, 6, 8, 2,
1, 6, 7, 8, 4,
9, 7, 4, 6, 2,
3, 7, 5, 4, 1]
input = torch.Tensor(input).view(1, 1, 5, 5)
conv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, stride=2, bias=False) #stride=2,表示滑框跳度为2
Kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)
conv_layer.weight.data = Kernel.data
output = conv_layer(input)
print(output)
tensor([[[[211., 262.],
[251., 169.]]]], grad_fn=<ConvolutionBackward0>)
最大池化层
- 取每一个区域中最大的那个数,组成一个新矩阵
import torch
input = [3, 4, 6, 5,
2, 4, 6, 8,
1, 6, 7, 8,
9, 7, 4, 6, ]
input = torch.Tensor(input).view(1, 1, 4, 4)
maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2)
output = maxpooling_layer(input)
print(output)
tensor([[[[4., 8.],
[9., 8.]]]])
CNN 整个流程图
代码(CPU版)
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms #将w(宽)×h(高)×c(channel)转换成c×w×h,即把通道提到最前面
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) #mnist数据集的均值,前人已经算好的,直接用这两个数就行
])
train_dataset = datasets.MNIST(root='./dataset/minist/',
train=True,
download=True,
transform=transform)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='./dataset/mnist/',
train=False,
download=True,
transform=transform)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
#---------------------------------------------------以下为CNN-------------------------------------------------------------------------#
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.pooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
x = x.view(batch_size, -1)
x = self.fc(x)
return x
model = Net()
#---------------------------------------------------以上为CNN-------------------------------------------------------------------------#
criterion = torch.nn.CrossEntropyLoss() #交叉熵
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) #momentum:动量,有助于更快收敛,也有助于跳出局部最优
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
optimizer.zero_grad()
#前馈+反馈+更新
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad(): #test不需要算梯度
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1) # 用_表示一个不重要的值,后面也没用到,就只占个位置,dim=1表示横向求max
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set: %d %%' % (100 * correct / total))
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
[1, 300] loss: 0.593
[1, 600] loss: 0.192
[1, 900] loss: 0.148
Accuracy on test set: 96 %
[2, 300] loss: 0.115
[2, 600] loss: 0.097
[2, 900] loss: 0.098
Accuracy on test set: 97 %
[3, 300] loss: 0.080
[3, 600] loss: 0.083
[3, 900] loss: 0.070
Accuracy on test set: 97 %
[4, 300] loss: 0.064
[4, 600] loss: 0.069
[4, 900] loss: 0.068
Accuracy on test set: 98 %
[5, 300] loss: 0.059
[5, 600] loss: 0.057
[5, 900] loss: 0.057
Accuracy on test set: 98 %
[6, 300] loss: 0.052
[6, 600] loss: 0.049
[6, 900] loss: 0.055
Accuracy on test set: 98 %
[7, 300] loss: 0.050
[7, 600] loss: 0.047
[7, 900] loss: 0.045
Accuracy on test set: 98 %
[8, 300] loss: 0.049
[8, 600] loss: 0.041
[8, 900] loss: 0.041
Accuracy on test set: 98 %
[9, 300] loss: 0.039
[9, 600] loss: 0.038
[9, 900] loss: 0.045
Accuracy on test set: 98 %
[10, 300] loss: 0.039
[10, 600] loss: 0.039
[10, 900] loss: 0.037
Accuracy on test set: 98 %
代码(调用GPU版)
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms #将w(宽)×h(高)×c(channel)转换成c×w×h,即把通道提到最前面
import matplotlib.pyplot as plt
import sys
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) #mnist数据集的均值,前人已经算好的,直接用这两个数就行
])
train_dataset = datasets.MNIST(root='./dataset/minist/',
train=True,
download=True,
transform=transform)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='./dataset/mnist/',
train=False,
download=True,
transform=transform)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
#---------------------------------------------------以下为CNN-------------------------------------------------------------------------#
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.pooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
x = x.view(batch_size, -1)
x = self.fc(x)
return x
model = Net()
#**********************以下为调用GPU改进处*******************************#
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
#**********************以上为调用GPU改进处*******************************#
print(device)
#---------------------------------------------------以上为CNN-------------------------------------------------------------------------#
criterion = torch.nn.CrossEntropyLoss() #交叉熵
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) #momentum:动量,有助于更快收敛,也有助于跳出局部最优
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
#**********************以下为调用GPU改进处*******************************#
inputs,target=inputs.to(device),target.to(device)
#**********************以上为调用GPU改进处*******************************#
optimizer.zero_grad()
#前馈+反馈+更新
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad(): #test不需要算梯度
for data in test_loader:
images, labels = data
#**********************以下为调用GPU改进处*******************************#
images,labels=images.to(device),labels.to(device)
#**********************以上为调用GPU改进处*******************************#
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1) # 用_表示一个不重要的值,后面也没用到,就只占个位置,dim=1表示横向求max
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))
return correct / total
epoch_list=[]
accu_list=[]
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
accu=test()
epoch_list.append(epoch)
accu_list.append(accu)
plt.plot(epoch_list,accu_list,'o-')
plt.xticks(range(11))
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.grid(alpha=0.4)
plt.show()
cuda:0
[1, 300] loss: 0.630
[1, 600] loss: 0.207
[1, 900] loss: 0.152
Accuracy on test set: 96 % [9673/10000]
[2, 300] loss: 0.115
[2, 600] loss: 0.108
[2, 900] loss: 0.103
Accuracy on test set: 97 % [9749/10000]
[3, 300] loss: 0.088
[3, 600] loss: 0.082
[3, 900] loss: 0.075
Accuracy on test set: 98 % [9811/10000]
[4, 300] loss: 0.067
[4, 600] loss: 0.065
[4, 900] loss: 0.067
Accuracy on test set: 98 % [9812/10000]
[5, 300] loss: 0.061
[5, 600] loss: 0.060
[5, 900] loss: 0.055
Accuracy on test set: 98 % [9814/10000]
[6, 300] loss: 0.053
[6, 600] loss: 0.053
[6, 900] loss: 0.049
Accuracy on test set: 98 % [9866/10000]
[7, 300] loss: 0.044
[7, 600] loss: 0.049
[7, 900] loss: 0.050
Accuracy on test set: 98 % [9868/10000]
[8, 300] loss: 0.040
[8, 600] loss: 0.048
[8, 900] loss: 0.044
Accuracy on test set: 98 % [9882/10000]
[9, 300] loss: 0.038
[9, 600] loss: 0.042
[9, 900] loss: 0.043
Accuracy on test set: 98 % [9840/10000]
[10, 300] loss: 0.040
[10, 600] loss: 0.040
[10, 900] loss: 0.034
Accuracy on test set: 98 % [9883/10000]
课后作业
尝试一个更加复杂的CNN网络
- 3*二维卷积
- 3*ReLU
- 3*MaxPooling
- 2*Linear
- 尝试不同的参数,比较他们的性能
作业代码中参数的确定
代码及运行结果
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms #将w(宽)×h(高)×c(channel)转换成c×w×h,即把通道提到最前面
import matplotlib.pyplot as plt
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) #mnist数据集的均值,前人已经算好的,直接用这两个数就行
])
train_dataset = datasets.MNIST(root='./dataset/minist/',
train=True,
download=True,
transform=transform)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='./dataset/mnist/',
train=False,
download=True,
transform=transform)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
#---------------------------------------------------以下为CNN-------------------------------------------------------------------------#
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
#3*二维卷积
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5) #1*28*28
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5) #20*8*8
self.conv3 = torch.nn.Conv2d(20, 30, kernel_size=3) #30*2*2
self.pooling = torch.nn.MaxPool2d(2)
self.fc1 = torch.nn.Linear(30, 20)
self.fc2 = torch.nn.Linear(20, 10)
def forward(self, x):
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
x=F.relu(self.pooling(self.conv3(x)))
x = x.view(batch_size, -1)
x = self.fc1(x)
xx=self.fc2(x)
return x
model = Net()
#**********************以下为调用GPU改进处*******************************#
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
#**********************以上为调用GPU改进处*******************************#
print(device)
#---------------------------------------------------以上为CNN-------------------------------------------------------------------------#
criterion = torch.nn.CrossEntropyLoss() #交叉熵
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) #momentum:动量,有助于更快收敛,也有助于跳出局部最优
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
#**********************以下为调用GPU改进处*******************************#
inputs, target = inputs.to(device), target.to(device)
#**********************以上为调用GPU改进处*******************************#
optimizer.zero_grad()
#前馈+反馈+更新
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad(): #test不需要算梯度
for data in test_loader:
images, labels = data
#**********************以下为调用GPU改进处*******************************#
images, labels = images.to(device), labels.to(device)
#**********************以上为调用GPU改进处*******************************#
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1) # 用_表示一个不重要的值,后面也没用到,就只占个位置,dim=1表示横向求max
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))
return correct / total
epoch_list = []
accu_list = []
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
accu = test()
epoch_list.append(epoch)
accu_list.append(accu)
plt.plot(epoch_list, accu_list, 'o-')
plt.xticks(range(10))
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.grid(alpha=0.4)
plt.show()
cuda:0
[1, 300] loss: 1.177
[1, 600] loss: 0.320
[1, 900] loss: 0.225
Accuracy on test set: 92 % [9242/10000]
[2, 300] loss: 0.160
[2, 600] loss: 0.141
[2, 900] loss: 0.124
Accuracy on test set: 96 % [9618/10000]
[3, 300] loss: 0.106
[3, 600] loss: 0.091
[3, 900] loss: 0.090
Accuracy on test set: 97 % [9760/10000]
[4, 300] loss: 0.079
[4, 600] loss: 0.077
[4, 900] loss: 0.072
Accuracy on test set: 97 % [9786/10000]
[5, 300] loss: 0.065
[5, 600] loss: 0.062
[5, 900] loss: 0.063
Accuracy on test set: 97 % [9777/10000]
[6, 300] loss: 0.056
[6, 600] loss: 0.055
[6, 900] loss: 0.054
Accuracy on test set: 98 % [9812/10000]
[7, 300] loss: 0.053
[7, 600] loss: 0.047
[7, 900] loss: 0.045
Accuracy on test set: 98 % [9849/10000]
[8, 300] loss: 0.040
[8, 600] loss: 0.043
[8, 900] loss: 0.044
Accuracy on test set: 98 % [9857/10000]
[9, 300] loss: 0.039
[9, 600] loss: 0.042
[9, 900] loss: 0.037
Accuracy on test set: 98 % [9863/10000]
[10, 300] loss: 0.034
[10, 600] loss: 0.037
[10, 900] loss: 0.035
Accuracy on test set: 98 % [9869/10000]