Pytorch LeNet 3:网络输出可视化

修改网络实现方式

为了是的LeNet可视化,我们需要修改下Pytorch/LeNet/LeNet.py的实现,将前面的卷积池化层,和后面的全连接层分开,便于可以独立获取卷积核池化层的所有特征,实现后的代码如下:

LeNet2.py


import torch
import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()

        self.features = nn.Sequential(
            nn.Conv2d(1, 6, 5),#self.C1 
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),# self.S2

            nn.Conv2d(6, 16, 5),#self.C3 
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),#self.S4
        )
        self.classifier = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),#self.fc1
            nn.ReLU(inplace=True),
            nn.Linear(120, 84),#self.fc2
            nn.ReLU(inplace=True),
            nn.Linear(84, 10),#self.fc3
        )

    def forward(self, x):
      
        x = self.features(x)
        x = x.view(-1, self.num_flat_features(x))#[1,400]
        x =self.classifier(x)
        return x

    def num_flat_features(self, x):
        # 除去批处理维度的其他所有维度
        #torch.Size([1, 16, 5, 5])--->torch.Size([16, 5, 5])
        size = x.size()[1:]  
        num_features = 1
        for s in size:
            num_features *= s
        return num_features#400


#net = Net()
#print(net)

#input = torch.randn(1, 1, 32, 32)
#net.forward(input)

重新训练网络

手写字体识别训练中我们已经搭建好了了Lenet Mnist的训练环境。Lenet的网络实现方式改写后,需要使用LeNet2重新训练我们的网络,这样才能保证加载的网络和训练的网络参数是一致的

# python LeNetTrain.py
start train with epoch: 3
###iteration[:100],[Epoch:1],[Lr:0.00100000] train loss: 0.965
avg accuracy:90%
save net: ../models/lenet_1.pth
###iteration[:288],[Epoch:2],[Lr:0.00100000] train loss: 0.184
avg accuracy:95%
save net: ../models/lenet_2.pth
###iteration[:476],[Epoch:3],[Lr:0.00100000] train loss: 0.128
avg accuracy:97%


加载训练好的网络

path = '../models/'+'lenet_3.pth'
def load_model(path):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = LeNet2.Net().to(device)
    net.load_state_dict(torch.load(path))
    return net

model = load_model(path)

print(model.features)

加载后,打印结果如下

Sequential(
  (0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
  (1): ReLU(inplace=True)
  (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (3): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  (4): ReLU(inplace=True)
  (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)

如上日志所示,定义LeNet的Sequential共有六层操作,我们接下来会看下没一层输出后的图片的效果

显示处理前的图片

#定义数据加载器
resize = 32
transform = transforms.Compose([transforms.Resize(size=(resize, resize)),
                                    torchvision.transforms.ToTensor()
                                    ])
test_data = torchvision.datasets.MNIST(root="../datas",
                                            train=False,
                                            transform=transform,
                                            download=False)
test_loader = torch.utils.data.DataLoader(dataset = test_data,batch_size = 1,shuffle = False)

def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()

# 随机获取训练图片
dataiter = iter(test_loader)
images, labels = dataiter.next()

# 显示图片
imshow(torchvision.utils.make_grid(images))

图片显示个数,与dataloader设置的batch_size一致

7

PyTorch 提供了一个名为register_forward_hook的方法,它允许传入一个可以提取特定层输出的函数

显示各层处理后的图片

在实际显示处理后的图片之前,我们先来看下,LeNet的每层的输出

        self.features = nn.Sequential(
            nn.Conv2d(1, 6, 5),#self.C1 
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),# self.S2

            nn.Conv2d(6, 16, 5),#self.C3 
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),#self.S4
        )
        self.classifier = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),#self.fc1
            nn.ReLU(inplace=True),
            nn.Linear(120, 84),#self.fc2
            nn.ReLU(inplace=True),
            nn.Linear(84, 10),#self.fc3
        )

根据Lenet 网络结构介绍可知,各层输出如下
features各层输出

features[0] = [1, 6, 28, 28]
features[1] = [1, 6, 28, 28]
features[2] = [1, 6, 14, 14]
features[3] = [1, 16, 10, 10]
features[4] = [1, 16, 10, 10]
features[5] = [1, 16, 5, 5]

classifier各层输出

classifier[0] 的输入 [1, 400]
classifier[0]=[1,120]
classifier[1]=[1,120]
classifier[2]=[1,84]
classifier[3]=[1,84]
classifier[4]=[1,10]

处理后的各层的图片效果

在这里插入图片描述

代码实现,请参考LeNet/LeNetVision

def save_img(tensor, name):
    #替换深度和batch_size所在的纬度值
    tensor = tensor.permute((1, 0, 2, 3))#将[1, 6, 28, 28]转化成[1, 6, 28, 28]
    print('output permute:',tensor.shape)
    im = make_grid(tensor, normalize=True, scale_each=True, nrow=8, padding=2).permute((1, 2, 0))
    im = (im.cpu().data.numpy() * 255.).astype(np.uint8)#将0~1之间的像素值,转化成0~255
    Image.fromarray(im).save(name + '.jpg')
    
def save_img_linear(tensor, name):
    #替换深度和batch_size所在的纬度值
    tensor = tensor.permute((1, 0))
    print('output permute:',tensor.shape)
    im = make_grid(tensor, normalize=True, scale_each=True, nrow=8, padding=2).permute((1, 2, 0))
    im = (im.cpu().data.numpy() * 255.).astype(np.uint8)#将0~1之间的像素值,转化成0~255
    Image.fromarray(im).save(name + '.jpg')

#模型训练的时候,图片是加载到GPU进行训练的,此时的前馈过程输入格式要跟训练时保持一致
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#features各层输出
for i in range(6):
    print('------features%d------'%i)
    new_model = model.features[i]
    print('input:',images.shape)

    print('layer:',new_model)
    layer_out = new_model(images.to(device))
    print('output',layer_out.shape)
    save_img(layer_out, 'features'+str(i))
    images = layer_out #下一层网络的输入是上一层网络的输出

#将features最后一层输出的数据[1,16,5,5]转化成[1,400]
images = images.view(-1, model.num_flat_features(images))
#classifier各层输出
for i in range(5):
    print('------classifier%d------'%i)
    new_model = model.classifier[i]
    print('input:',images.shape)

    print('layer:',new_model)
    layer_out = new_model(images.to(device))
    print('output',layer_out.shape)
    save_img_linear(layer_out, 'classifier'+str(i))
    images = layer_out #下一层网络的输入是上一层网络的输出
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值