全部代码在最后面。
GoogLeNet:
Inception块:
1X1 convolution:信息融合(例子,分数之和决定排名),可以降低计算量
code:
self.branch1x1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
InceptionA:
code:(不可单独运行)
class InceptionA(nn.Module):
def __init__(self, in_channels):
super(InceptionA, self).__init__()
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
barnch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1)
总的code:
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size = 64
transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, )) # 第一个(0.1307, )是均值,第二个是标准差
])
train_dataset = datasets.MNIST(root=’…/dataset/mnist’,
train=True,
download=True,
transform=transforms)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root=’…/dataset/mnist’,
train=False,
download=True,
transform=transforms)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
class InceptionA(torch.nn.Module):
def init(self, in_channels):
super(InceptionA, self).init()
self.branch1x1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = torch.nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = torch.nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = torch.nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = torch.nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = torch.nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1)
class Net(torch.nn.Module):
def init(self):
super(Net, self).init()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(88, 20, kernel_size=5)
self.incep1 = InceptionA(in_channels=10)
self.incep2 = InceptionA(in_channels=20)
self.mp = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(1408, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.incep1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.incep2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
model = Net()
if torch.cuda.is_available():
device = “cuda:0”
else:
device = “cpu”
device = torch.device(device)
model.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
X:inputs,Y:target
inputs, target = data
inputs, target = inputs.to(device), target.to(device)
optimizer.zero_grad()
# 前向+反向+更新
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch +1, batch_idx +1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1) # 算每一行最大值的下标是多少,其实也代表了每一行的分类 / max返回 每一行最大值是多少,每一行最大值的下标是多少
total += labels.size(0)
correct += (predicted == labels).sum().item() # ==预测的和原来的作比较,真为1,假为0,再总的加起来,求和后再把这个标量提出来
print(‘Accuracy on test set: %d %%’ % (100 * correct / total))
if name == ‘main’:
epoch_list = []
accuracy_list = []
for epoch in range(10):
train(epoch)
test()
效果图:
Residual Network:解决梯度消失,加了一个跳连接,H(x)= F(x)+ x,对x求导后,会有一个1,保证会大于1!
梯度消失指:
w -= a *g,当g小于0时,每一层都会乘一个小于0的数(g),这样久了后,w就不会再更新。
code:(不可单独运行)
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super.channels = channels
# 保证输入输出通道一样
self.conv1 = torch.nn.Conv2d(channels, channels,
kernel_size=3, paddind=1)
self.conv2 = torch.nn.Conv2d(channels, channels,
kernel_size=3, paddind=1)
def forward(self, x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x + y)
总的code:
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size = 64
transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, )) # 第一个(0.1307, )是均值,第二个是标准差
])
train_dataset = datasets.MNIST(root='../dataset/mnist',
train=True,
download=True,
transform=transforms)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist',
train=False,
download=True,
transform=transforms)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
# 保证输入输出通道一样
self.conv1 = torch.nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
self.conv2 = torch.nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
def forward(self, x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x + y)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
self.mp = torch.nn.MaxPool2d(2)
self.rblock1 = ResidualBlock(16)
self.rblock2 = ResidualBlock(32)
self.fc = torch.nn.Linear(512, 10) # 线性层
def forward(self, x):
# 先求batch_size,用张量.size()求,取第0个,即是维度(样本的数量)
# Flatten data from (n, 1, 28, 28) to (n, 784)
batch_size = x.size(0)
x = self.mp(F.relu(self.conv1(x)))
x = self.rblock1(x)
x = self.mp(F.relu(self.conv2(x)))
x = self.rblock2(x)
x = x.view(batch_size, -1) # flatten 平铺 # -1 此处自动算出的是320
# print("x.shape",x.shape)
x = self.fc(x)
return x
model = Net()
# 使用GPU
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
device = torch.device(device)
model.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
# X:inputs,Y:target
inputs, target = data
inputs, target = inputs.to(device), target.to(device)
optimizer.zero_grad()
# 前向+反向+更新
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch +1, batch_idx +1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, dim=1) # 算每一行最大值的下标是多少,其实也代表了每一行的分类 / max返回 每一行最大值是多少,每一行最大值的下标是多少
total += labels.size(0)
correct += (predicted == labels).sum().item() # ==预测的和原来的作比较,真为1,假为0,再总的加起来,求和后再把这个标量提出来
print('Accuracy on test set: %d %%' % (100 * correct / total))
if __name__ == '__main__':
epoch_list = []
accuracy_list = []
for epoch in range(10):
train(epoch)
test()
效果图: