pytorch 实现InceptionV1、InceptionV2,同时在Cifar10测试

1、GoogleNet

      到现在,总共有着5个版本----inceptionV1-V4、Inception-Reset,为我们提供了一个新颖的思路探索---多路特征获取网络(Conv、Maxpool、avgpool),同时提供卷积 在1*1 和 3*3stack结构,不仅放大了视野,而且也相对VGG 3*3 stack减小了参数的运算。我们可以先对比V1与V2 multi-block的变化:

#  对比算法我将ReLU---》ReLU6剔除一些较为大的数据
#   
class InceptionV1_base(nn.Module):
    def __init__(self, in_channel, layers=[64,96,128,16,32,32]):
        super(InceptionV1_base,self).__init__()
        self.branch_1 = nn.Sequential(
        nn.Conv2d(in_channel,layers[0],kernel_size=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_2 = nn.Sequential(
        nn.Conv2d(in_channel,layers[1],kernel_size=1,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[1],layers[2],kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_3 = nn.Sequential(
        nn.Conv2d(in_channel,layers[3],kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[3],layers[4],kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_4 = nn.Sequential(
        nn.MaxPool2d(kernel_size=3,stride=1,padding=1),
        nn.Conv2d(in_channel,layers[5],kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        )
    def forward(self,x):
        b_1 = self.branch_1(x)
        b_2 = self.branch_2(x)
        b_3 = self.branch_3(x)
        b_4 = self.branch_4(x)
        y = torch.cat([b_1,b_2,b_3,b_4],dim=1)
        return y
#####
###       相对比Inception1 分支3更加深3*3网络,特征持久提取
##         分支4改进使用平均池花,这里大家可以参考network in network 
class InceptionV2_base(nn.Module):
    def __init__(self,in_channel,layers=[64,64,64,64,96,96,32]):
        super( InceptionV2_base,self).__init__()
        self.branch_1 = nn.Sequential(
        nn.Conv2d(in_channel,layers[0],kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_2 = nn.Sequential(
        nn.Conv2d(in_channel,layers[1],kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[1],layers[2],kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_3 = nn.Sequential(
        nn.Conv2d(in_channel,layers[3],kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[3],layers[4],kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[4],layers[5],kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch_4 = nn.Sequential(
        nn.AvgPool2d(kernel_size=3,stride=1,padding=1),
        nn.Conv2d(in_channel,layers[6],1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        )
    def forward(self,x):
        b_1 = self.branch_1(x)
        b_2 = self.branch_2(x)
        b_3 = self.branch_3(x)
        b_4 = self.branch_4(x)
        y = torch.cat([b_1,b_2,b_3,b_4],dim=1)
        return y

   在InceptionV2网络中的剪枝的做法,而不是采用暴力的MaxPool2D方式

class Inceptionv2_dicount(nn.Module):
    def __init__(self,in_channel,layers=[128,160,64,96,96]):
        super(Inceptionv2_dicount,self).__init__()
        self.branch1 = nn.Sequential(
         nn.Conv2d(in_channel,layers[0],1,stride=1,padding=0,bias=False),
         nn.ReLU6(inplace=True),
         nn.Conv2d(layers[0],layers[1],3, stride=2,padding=1,bias=False),
         nn.ReLU6(inplace=True),
        )
        self.branch2 = nn.Sequential(
        nn.Conv2d(in_channel,layers[2],1,stride=1,padding=0,bias=False),
        nn.ReLU6(True),
        nn.Conv2d(layers[2],layers[3],3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(layers[3],layers[4],3,stride=2,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        )
        self.branch3 = nn.Sequential(
        nn.MaxPool2d(3,stride=2,padding=1),
        )
    def forward(self,x):
        b1 = self.branch1(x)
        b2 = self.branch2(x)
        b3 = self.branch3(x)
        return torch.cat([b1,b2,b3],dim=1)

2、基本网络实现

class InceptionV1(nn.Module):
    def __init__(self,num_class,block=InceptionV1_base,grayscale=False):
        if grayscale:
            dim = 1
        else :
            dim = 3
        self.block = block
        super(InceptionV1,self).__init__()
        self.bottle = nn.Sequential(
        nn.Conv2d(dim,64,kernel_size=7,stride=2,padding=3,bias=False),
        nn.ReLU6(inplace=True),
        nn.MaxPool2d(kernel_size=3,stride=2,padding=1),
        nn.Conv2d(64,64,kernel_size=1,stride=1,padding=0,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(64,192,kernel_size=3,stride=1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        nn.MaxPool2d(kernel_size=3,stride=2,padding=1),
        )
        self.layer1 = self._make_layer(192,[64,96,128,16,32,32  ])
        self.layer2 = self._make_layer(256,[128,128,192,32,96,64])
        self.max = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
        self.layer3 = self._make_layer(480,[192,96,208,16,48,64])
        self.layer4 = self._make_layer(512,[160,112,224,24,64,64])
        self.layer5 = self._make_layer(512,[128,128,256,24,64,64])
        self.layer6 = self._make_layer(512,[112,144,288,32,64,64])
        self.layer7 = self._make_layer(528,[256,160,320,32,128,128])
        self.layer8 = self._make_layer(832,[256,160,320,32,128,128])
        self.layer9 = self._make_layer(832,[384,192,384,48,128,128])
        self.avg = nn.AvgPool2d(7,stride=1)
        self.bottom = nn.Sequential(
        nn.Dropout(p=0.8),
        nn.Linear(1024,num_class),
        )
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] *m.out_channels
                m.weight.data.normal_(0, (2. / n)**.5 )
            elif  isinstance(m,nn.Linear):
                m.weight.data.normal_(0,0.01)
                m.bias.data.zero_()

    def _make_layer(self, in_channel,layers ):
        blocks = []
        blocks.append( self.block(in_channel,layers) )
        return nn.Sequential(*blocks)
    def forward(self,x):
        x = self.bottle(x)
        x =self.layer1(x)
        x =self.layer2(x)
        x = self.max(x)
        x =self.layer3(x)
        x =self.layer4(x)
        x =self.layer5(x)
        x =self.layer6(x)
        x = self.max(x)
        x =self.layer7(x)
        x =self.layer8(x)
        x =self.layer9(x)
        x = self.avg(x)
        x = x.view(x.size(0),-1 )
        x = self.bottom(x)
        pro = F.softmax(x)
        return x , pro


class InceptionV2(nn.Module):
    def __init__(self,num_class,block=InceptionV2_base,strideblock=Inceptionv2_dicount ,grayscale=False):
        self.block = block
        self.discount = strideblock
        if grayscale:
            dim = 1
        else:
            dim = 3
        super(InceptionV2,self).__init__()
        self.bottle=nn.Sequential(
        nn.Conv2d(dim,64,7,stride=2,padding=3,bias=False),
        nn.ReLU6(True),
        nn.MaxPool2d(3,stride=2,padding=1),
        nn.Conv2d(64,64,1,1,0,bias=False),
        nn.ReLU6(inplace=True),
        nn.Conv2d(64,192,3,1,padding=1,bias=False),
        nn.ReLU6(inplace=True),
        nn.MaxPool2d(kernel_size=3,stride=2,padding=1),
        )
        self.layer1 = self._make_layer(192,[ 64,64,64,64,96,96,32 ])
        self.layer2 = self._make_layer(256,[64,64,96,64,96,96,64 ])
        self.layer3 = nn.Sequential(strideblock(320))
        self.layer4 = self._make_layer(576,[224,64,96,96,128,128,128])
        self.layer5 = self._make_layer(576,[192,96,128,96,128,128,128])
        self.layer6 = self._make_layer(576,[160,128,160,128,160,160,96])
        self.layer7 = self._make_layer(576,[96,128,192,160,192,192,96])
        self.layer8 = nn.Sequential(strideblock(576,[128,192,192,256,256]))
        self.layer9 = self._make_layer(1024,[352,192,320,160,224,224,128])
        self.layer10 = self._make_layer(1024,[352,192,320,192,224,224,128])
        self.avgool = nn.AvgPool2d(7)
        self.fc = nn.Sequential(
        nn.Dropout(p=0.8),
        nn.Linear(1024,num_class),
        )
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] *m.out_channels
                m.weight.data.normal_(0, (2. / n)**.5 )
            elif  isinstance(m,nn.Linear):
                m.weight.data.normal_(0,0.01)
                m.bias.data.zero_()
    def forward(self,x):
        x = self.bottle(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x =self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x =self.layer6(x)
        x = self.layer7(x)
        x = self.layer8(x)
        x =self.layer9(x)
        x =self.layer10(x)
        x = self.avgool(x)
        x = x.view(x.size(0),-1)
        x = self.fc(x)
        pro = F.softmax(x)
        return x , pro

    def _make_layer(self,in_channel,layers):
        blocks = []
        blocks.append(self.block(in_channel,layers))
        return nn.Sequential(*blocks)

3、数据测试对比

     以下数据都是将Cifar10放到224尺度进行对比,而且都是以50epoch为相同训练指标下进行的,当然这是有着明显的缺陷的。后期自己有着改进训练,应该采用分步SGD训练的方式进行调参(这里自己训练的batch_size=256):

  文章参考:

http://arxiv.org/abs/1502.03167   

http://arxiv.org/pdf/1409.4842v1.pdf

  • 1
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值