Inception-ResNet-v1

上一篇Inception-v4中,已经实现了Inception-v4网络模型,参考此篇博客以及Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning论文。再来实现一下Inception-ResNet-v1网络模型,其实Inception-ResNet-v2网络模型与Inception-ResNet-v1相差很小,只是需要变动一下其中的参数,故不再实现v2。

第一步,实现 InceptionResNetA

'''
Figure 10. 
The schema for 35 × 35 grid (Inception-ResNet-A)
module of Inception-ResNet-v1 network.
'''
def InceptionResNetA(activation_shape=(35,35,256)):
    input_ = Input(shape=activation_shape)
    
    conv1 = Conv2D(32, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    
    conv2 = Conv2D(32, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv3 = Conv2D(32, kernel_size=(3,3), strides=1, padding='SAME', activation='relu')(conv2)
    
    conv4 = Conv2D(32, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv5 = Conv2D(32, kernel_size=(3,3), strides=1, padding='SAME', activation='relu')(conv4)
    conv6 = Conv2D(32, kernel_size=(3,3), strides=1, padding='SAME', activation='relu')(conv5)
    
    conv7 = concatenate([conv1, conv3, conv6])   #?????
    
    conv8 = Conv2D(256, kernel_size=(1,1), strides=1, padding='SAME')(conv7)
    
    
    add1 = add([input_, conv8])
    
    output_ = Activation('relu')(add1)
    
    model = Model(inputs=input_, outputs=output_)
#     model.summary()
    return model
# InceptionResNetA()

第二步,实现 InceptionResNetB

'''
Figure 11. 
The schema for 17 × 17 grid (Inception-ResNet-B)
module of Inception-ResNet-v1 network.
'''
def InceptionResNetB(activation_shape=(17,17,896)):
    input_ = Input(shape=activation_shape)
    
    conv1 = Conv2D(128, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    
    conv2 = Conv2D(128, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv3 = Conv2D(128, kernel_size=(1,7), strides=1, padding='SAME', activation='relu')(conv2)
    conv4 = Conv2D(128, kernel_size=(7,1), strides=1, padding='SAME', activation='relu')(conv3)
    
    conv5 = concatenate([conv1, conv4])
    
    conv6 = Conv2D(896, kernel_size=(1,1), strides=1, padding='SAME')(conv5)
    
    add1 = add([input_, conv6])
    
    output_ = Activation('relu')(add1)
    
    model = Model(inputs=input_, outputs=output_)
#     model.summary()
    return model
# InceptionResNetB()

第三步,重新定义 ReductionB

'''
Figure 12. 
“Reduction-B” 17×17 to 8×8 grid-reduction module.
This module used by the smaller Inception-ResNet-v1 network in Figure 15.
'''
def ReductionB(concat_shape=(17,17,896)):
    input_ = Input(shape=concat_shape)
    
    maxpool = MaxPool2D(pool_size=(3,3), strides=2, padding='VALID')(input_)
    
    conv1 = Conv2D(256, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv2 = Conv2D(384, kernel_size=(3,3), strides=2, padding='VALID', activation='relu')(conv1)
    
    conv3 = Conv2D(256, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv4 = Conv2D(256, kernel_size=(3,3), strides=2, padding='VALID', activation='relu')(conv3)
    
    conv5 = Conv2D(256, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv6 = Conv2D(256, kernel_size=(3,3), strides=1, padding='SAME', activation='relu')(conv5)
    conv7 = Conv2D(256, kernel_size=(3,3), strides=2, padding='VALID', activation='relu')(conv6)
    
    concat1 = concatenate([maxpool, conv2, conv4, conv7])
    
    model = Model(inputs=input_, outputs=concat1)
#     model.summary()
    
    return model
# ReductionB()

第四步,实现 InceptionResNetC

'''
Figure 13. 
The schema for 8×8 grid (Inception-ResNet-C) module
of Inception-ResNet-v1 network.
'''
def InceptionResNetC(activation_shape=(8,8,1792)):
    input_ = Input(shape=activation_shape)
    
    conv1 = Conv2D(192, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    
    conv2 = Conv2D(192, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(input_)
    conv3 = Conv2D(192, kernel_size=(1,3), strides=1, padding='SAME', activation='relu')(conv2)
    conv4 = Conv2D(192, kernel_size=(3,1), strides=1, padding='SAME', activation='relu')(conv3)
    
    conv5 = concatenate([conv1, conv4])
    
    conv6 = Conv2D(1792, kernel_size=(1,1), strides=1, padding='SAME')(conv5)
    
    add1 = add([input_, conv6])
    
    output_ = Activation('relu')(add1)
    
    model = Model(inputs=input_, outputs=output_)
#     model.summary()
    return model
# InceptionResNetC()

第五步,实现 Stem

'''
Figure 14. 
The stem of the Inception-ResNet-v1 network
'''
def Stem(input_shape=(299, 299, 3)):
    input_ = Input(shape=input_shape)
    conv1 = Conv2D(32, kernel_size=(3,3), strides=2, padding='VALID', activation='relu')(input_)
    conv2 = Conv2D(32, kernel_size=(3,3), strides=1, padding='VALID', activation='relu')(conv1)
    conv3 = Conv2D(64, kernel_size=(3,3), strides=1, padding='SAME', activation='relu')(conv2)
    
    maxpool1 = MaxPool2D(pool_size=(3,3), strides=2, padding='VALID')(conv3)
    conv4 = Conv2D(80, kernel_size=(1,1), strides=1, padding='SAME', activation='relu')(maxpool1)
    
    conv5 = Conv2D(192, kernel_size=(3,3), strides=1, padding='VALID', activation='relu')(conv4)
    conv6 = Conv2D(256, kernel_size=(3,3), strides=2, padding='VALID', activation='relu')(conv5)
    
    model = Model(inputs=input_, outputs=conv6)
#     model.summary()
    return model
# Stem()

第六步,实现 InceptionResNetV1网络模型

'''
Figure 14. 
The stem of the Inception-ResNet-v1 network.
'''
def InceptionResNetV1(input_shape=(299,299,3), nclass=1000):
    model = Sequential()
    model.add(Input(shape=input_shape))
    model.add(Stem())
    for i in range(5):
        model.add(InceptionResNetA())
    model.add(ReductionA(filter_bank_size=[192, 192, 256, 384], concat_shape=(35, 35, 256)))
    for i in range(10):
        model.add(InceptionResNetB())
    model.add(ReductionB())
    for i in range(5):
        model.add(InceptionResNetC())
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.2))
    model.add(Dense(nclass, 'softmax'))
    
    return model
model = InceptionResNetV1()
model.summary()

如有错误,欢迎指正!!!

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值