import torch
from torch.nn import functional as F #导入函数
from torch import nn #神经网络
layer = nn.Conv2d(1, 3, kernel_size=3, stride=1, padding=0)
layer
x = torch.rand(1, 1, 28, 28)
x
out = layer.forward(x)
print("stride=1,padding=0:out.shape = ",out.shape)
layer = nn.Conv2d(1, 3, kernel_size=3, stride=1,padding=1)
out = layer.forward(x)
print("stride=1,padding=1:out.shape = ",out.shape)
layer = nn.Conv2d(1, 3, kernel_size=3, stride=2,padding=1)
out = layer.forward(x)
print("stride=2,padding=1:out.shape = ",out.shape)
print("-----Inner weight $ bias -----")
print("layer.weight = \n", layer.weight)
print("layer.weight.shape=",layer.weight.shape)
print("layer.bias.shape=",layer.bias.shape)
print("-----F.conv2d-----")
w = torch.rand(16, 3, 5, 5)
b = torch.rand(16)
x = torch.randn(1, 3, 28, 28)
out = F.conv2d(x, w, b, stride=1, padding=1 )
print("F.conv2d, stride=1, padding=1: out.shape =",out.shape)
out = F.conv2d(x, w, b, stride=2, padding=1 )
print("F.conv2d, stride=2, padding=1: out.shape =",out.shape)
总结
这个展示的部分主要是想表达stride与padding的用法以及之后产生了什么效果,cnn本身可解释性就不好,通过展示出来的shape从而知道发生了什么。
BatchNorm
Batch Normalization(简称BN)中的batch就是批量数据,即每一次优化时的样本数目,通常BN网络层用在卷积层后,用于重新调整数据分布。假设神经网络某层一个batch的输入为X=[x1,x2,…,xn],其中xi代表一个样本,n为batch size。
我们能看到BatchNorm1d的最基本属性。
同时我们也看得到layer的中位数与平均值的初始值都是一样的
但是经历了100轮的迭代后,中位数跟平均值发生了变化
残差网络
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch import nn, optim
#from torchvision.models import vgg13_bn
class ResBlk(nn.Module):
"""
resnet block
"""
def __init__(self, ch_in, ch_out):
super(ResBlk, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
# [b, ch_in, h, w] => [b, ch_out, h, w]
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
"""
:param x: [b, ch, h, w]
:return:
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# short cut.
# extra module: [b, ch_in, h, w] => [b, ch_out, h, w]
# element-wise add:
out = self.extra(x) + out
return out
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16)
)
# followed 4 blocks
# [b, 64, h, w] => [b, 128, h ,w]
self.blk1 = ResBlk(16, 16)
# [b, 128, h, w] => [b, 256, h, w]
self.blk2 = ResBlk(16, 32)
# # [b, 256, h, w] => [b, 512, h, w]
# self.blk3 = ResBlk(128, 256)
# # [b, 512, h, w] => [b, 1024, h, w]
# self.blk4 = ResBlk(256, 512)
self.outlayer = nn.Linear(32 * 32 * 32, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
# [b, 64, h, w] => [b, 1024, h, w]
x = self.blk1(x)
x = self.blk2(x)
# x = self.blk3(x)
# x = self.blk4(x)
# print(x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
def main():
batchsz = 32
cifar_test, cifar_train = load_data(batchsz)
x, label = iter(cifar_train).next()
print('x:', x.shape, 'label:', label.shape)
device = torch.device('cuda')
# model = Lenet5().to(device)
model = ResNet18().to(device)
criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
print(model)
for epoch in range(1000):
model.train()
for batchidx, (x, label) in enumerate(cifar_train):
# [b, 3, 32, 32]
# [b]
x, label = x.to(device), label.to(device)
logits = model(x)
# logits: [b, 10]
# label: [b]
# loss: tensor scalar
loss = criteon(logits, label)
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch, 'loss:', loss.item())
model.eval()
with torch.no_grad():
# test
total_correct = 0
total_num = 0
for x, label in cifar_test:
# [b, 3, 32, 32]
# [b]
x, label = x.to(device), label.to(device)
# [b, 10]
logits = model(x)
# [b]
pred = logits.argmax(dim=1)
# [b] vs [b] => scalar tensor
correct = torch.eq(pred, label).float().sum().item()
total_correct += correct
total_num += x.size(0)
# print(correct)
acc = total_correct / total_num
print(epoch, 'acc:', acc)
def load_data(batchsz):
cifar_train = datasets.CIFAR10('../data/cifar', True, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)
cifar_test = datasets.CIFAR10('../data/cifar', False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)
return cifar_test, cifar_train
if __name__ == '__main__':
main()