先来看一下卷积神经网络的样子
根据上面流程绘制脑图
目录
1、卷积
1)卷积计算
2)一个卷积层输入输出不同代码
import torch
in_channels,out_channels=5,10
width,height=100,100#图像大小
kennel_size=3
batch_size = 1
input=torch.randn(batch_size,in_channels,width,height)
conv_layer=torch.nn.Conv2d(in_channels,out_channels,kernel_size=kennel_size)
output=conv_layer(input)
print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)
结果torch.Size([1, 5, 100, 100])
torch.Size([1, 10, 98, 98])
torch.Size([10, 5, 3, 3])
3)输入输出通道相同代码
若想输入输出的wh相同
则输入需要添加padding=kernel_size/2,
padding=1表示在输入的外圈增加一圈0
conv_layer=torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
import torch
input=[3,4,6,5,7,
2,4,6,8,2,
2,6,7,8,4,
9,7,4,6,2,
3,7,5,4,1,]
input=torch.Tensor(input).view(1,1,5,5)
conv_layer=torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data=kernel.data
output=conv_layer(input)
print(output)
结果
tensor([[[[ 91., 168., 224., 215., 127.],
[122., 218., 295., 262., 149.],
[197., 263., 282., 214., 122.],
[196., 252., 253., 169., 86.],
[ 96., 112., 110., 68., 31.]]]], grad_fn=<ConvolutionBackward0>)
2、下采样
1)举例:对手写数字做卷积神经网络
要会求粉色框框数据
上图中分别卷积层1、池化层、卷积层2、池化层、全连接层
卷积层1self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5) 卷积层2self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5) 池化层self.pooling = torch.nn.MaxPool2d(2)
2)代码
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
# prepare dataset
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
# design model using class
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.pooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
# flatten data from (n,1,28,28) to (n, 784)
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
x = x.view(batch_size, -1) # -1 此处自动算出的是320
x = self.fc(x)
return x
model = Net()
# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# training cycle forward, backward, update
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, target = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('accuracy on test set: %d %% ' % (100 * correct / total))
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
。。。。。。
accuracy on test set: 98 %
[7, 300] loss: 0.041
[7, 600] loss: 0.044
[7, 900] loss: 0.048
accuracy on test set: 98 %
[8, 300] loss: 0.042
[8, 600] loss: 0.041
[8, 900] loss: 0.039
accuracy on test set: 98 %
[9, 300] loss: 0.033
[9, 600] loss: 0.041
[9, 900] loss: 0.039
accuracy on test set: 98 %
[10, 300] loss: 0.033
[10, 600] loss: 0.035
[10, 900] loss: 0.039
accuracy on test set: 98 %
Process finished with exit code 0