动手造轮子:googLeNet

在这里插入图片描述

class Inception(nn.module):
    def __init__(self,input_dim,c1,c2,c3,c4):
        self.conv1_1=torch.Conv2d(input_dim,c1,kernel_size=1,stride=1,padding=0)
        
        self.conv1_1_c2=torch.Conv2d(input_dim,c2[0],kernel_size=1,stride=1,padding=0)
        self.conv3_3_c2=torch.Conv2d(c2[0],c2[1],kernel_size=3,stride=1,padding=1)
        
        self.conv1_1_c3=torch.Conv2d(input_dim,c3[0],kernel_size=1,stride=1,padding=0)
        self.conv5_5_c3=torch.Conv2d(c3[0],c3[1],kernel_size=5,stride=1,padding=2)
        
        self.maxpool_c4=torch.MaxPool2d(kernel_size=3,stride=1,padding=1)
        self.conv1_1_c4=torch.Conv2d(input_dim,c4,kernel_size=1,stride=1,padding=0)
        
        
    def forward(self,x):
        x_1 = self.conv1_1(x)
        x_2 = self.conv3_3(self.conv1_1_c2(x_1))
        x_3 = self.conv5_5_c3(self.conv1_1_c3(x))
        x_4 = self.conv1_1_c4(self.maxpool_c4(x))
        
        return torch.cat((x_1,x_2,x_3,x_4),1)
        
class googleNet(nn.module):
    def __init__(self,input_dim):
        super(googleNet,self).__init__()
        net=nn.squential(
            torch.Conv2d(input_dim),
            torch.MaxPool2d(kernel_size=3,stride=1,padding=1),
            torch.Conv2d(input_dim),
            torch.MaxPool2d(kernel_size=3,stride=1,padding=1),
            inception()
            ……
            
        )

自己写的主要问题:
1.如何用sequential构造复杂的模型。
2.这个维度的计算不知道如何处理。

习的结果:
1.可以重复使用sequential来构造,输入也可以是sequential内容
2.维度的变化,主要是体现在卷积层的输入输出,池化层不影响channel维度。

教材代码:

import time
import torch
from torch import nn, optim
import torch.nn.functional as F

import sys
sys.path.append("..") 
import d2lzh_pytorch as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class Inception(nn.Module):
    # c1 - c4为每条线路里的层的输出通道数
    def __init__(self, in_c, c1, c2, c3, c4):
        super(Inception, self).__init__()
        # 线路1,单1 x 1卷积层
        self.p1_1 = nn.Conv2d(in_c, c1, kernel_size=1)
        # 线路2,1 x 1卷积层后接3 x 3卷积层
        self.p2_1 = nn.Conv2d(in_c, c2[0], kernel_size=1)
        self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
        # 线路3,1 x 1卷积层后接5 x 5卷积层
        self.p3_1 = nn.Conv2d(in_c, c3[0], kernel_size=1)
        self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
        # 线路4,3 x 3最大池化层后接1 x 1卷积层
        self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.p4_2 = nn.Conv2d(in_c, c4, kernel_size=1)

    def forward(self, x):
        p1 = F.relu(self.p1_1(x))
        p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
        p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
        p4 = F.relu(self.p4_2(self.p4_1(x)))
        return torch.cat((p1, p2, p3, p4), dim=1)  # 在通道维上连结输出
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
                   nn.ReLU(),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),
                   nn.Conv2d(64, 192, kernel_size=3, padding=1),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
                   Inception(256, 128, (128, 192), (32, 96), 64),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
                   Inception(512, 160, (112, 224), (24, 64), 64),
                   Inception(512, 128, (128, 256), (24, 64), 64),
                   Inception(512, 112, (144, 288), (32, 64), 64),
                   Inception(528, 256, (160, 320), (32, 128), 128),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
                   Inception(832, 384, (192, 384), (48, 128), 128),
                   d2l.GlobalAvgPool2d())

net = nn.Sequential(b1, b2, b3, b4, b5, 
                    d2l.FlattenLayer(), nn.Linear(1024, 10))
net = nn.Sequential(b1, b2, b3, b4, b5, d2l.FlattenLayer(), nn.Linear(1024, 10))

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值