tensorflow.keras实现Inception V3模型

参考简书链接: 深度神经网络Google Inception Net-V3结构图 - 简书

模型结构如下:

 代码如下:

from tensorflow.keras import Sequential,layers,Model


# 处理后的值
def my_cnn2d(input_value,my_filters,my_kernel_size,my_strides=1,my_padding='valid'):
    x=layers.Conv2D(filters=my_filters,kernel_size=my_kernel_size,strides=my_strides,padding=my_padding)(input_value)
    x=layers.BatchNormalization()(x)
    x=layers.Activation('relu')(x)
    return x
def block_1_1(input_value):

    conv_1_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    
    conv_2_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=48,my_padding='same')
    conv_2_2=my_cnn2d(input_value=conv_2_1,my_kernel_size=5,my_filters=64,my_padding='same')
    
    conv_3_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    conv_3_2=my_cnn2d(input_value=conv_3_1,my_kernel_size=3,my_filters=96,my_padding='same')
    conv_3_3=my_cnn2d(input_value=conv_3_2,my_kernel_size=3,my_filters=96,my_padding='same')
    
    conv_4_1=layers.AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(input_value)
    conv_4_2=my_cnn2d(input_value=conv_4_1,my_kernel_size=1,my_filters=32,my_padding='same')
    
    
    
    return layers.Concatenate()([conv_1_1,conv_2_2,conv_3_3,conv_4_2])
    
    
    

def block_1_2(input_value):
    conv_1_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    
    conv_2_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=48,my_padding='same')
    conv_2_2=my_cnn2d(input_value=conv_2_1,my_kernel_size=5,my_filters=64,my_padding='same')
    
    conv_3_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    conv_3_2=my_cnn2d(input_value=conv_3_1,my_kernel_size=3,my_filters=96,my_padding='same')
    conv_3_3=my_cnn2d(input_value=conv_3_2,my_kernel_size=3,my_filters=96,my_padding='same')
    
    conv_4_1=layers.AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(input_value)
    conv_4_2=my_cnn2d(input_value=conv_4_1,my_kernel_size=1,my_filters=64,my_padding='same')
    
    return layers.Concatenate()([conv_1_1,conv_2_2,conv_3_3,conv_4_2])


def block_1_3(input_value):
    conv_1_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    
    conv_2_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=48,my_padding='same')
    conv_2_2=my_cnn2d(input_value=conv_2_1,my_kernel_size=5,my_filters=64,my_padding='same')
    
    conv_3_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=64,my_padding='same')
    conv_3_2=my_cnn2d(input_value=conv_3_1,my_kernel_size=3,my_filters=96,my_padding='same')
    conv_3_3=my_cnn2d(input_value=conv_3_2,my_kernel_size=3,my_filters=96,my_padding='same')
    
    conv_4_1=layers.AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(input_value)
    conv_4_2=my_cnn2d(input_value=conv_4_1,my_kernel_size=1,my_filters=64,my_padding='same')
    
    return layers.Concatenate()([conv_1_1,conv_2_2,conv_3_3,conv_4_2])


def block_2_1(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=384,my_kernel_size=3,my_strides=2)
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=64,my_kernel_size=1,my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=96,my_kernel_size=3,my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=96,my_kernel_size=3,my_strides=2)
    
    layer_3_1=layers.MaxPool2D(pool_size=3,strides=2)(input_value)
    
    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_1])
    
def block_2_2(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=128,my_kernel_size=1,my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=128,my_kernel_size=(1,7),my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=192,my_kernel_size=(7,1),my_padding='same')
  
    layer_3_1=my_cnn2d(input_value=input_value,my_filters=128,my_kernel_size=(1,1),my_padding='same')
    layer_3_2=my_cnn2d(input_value=layer_3_1,my_filters=128,my_kernel_size=(7,1),my_padding='same')
    layer_3_3=my_cnn2d(input_value=layer_3_2,my_filters=128,my_kernel_size=(1,7),my_padding='same')
    layer_3_4=my_cnn2d(input_value=layer_3_3,my_filters=128,my_kernel_size=(7,1),my_padding='same')
    layer_3_5=my_cnn2d(input_value=layer_3_4,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    
    layer_4_1=layers.MaxPool2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_filters=192,my_kernel_size=(1,1),my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_5,layer_4_2])
    
def block_2_3(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=160,my_kernel_size=1,my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=160,my_kernel_size=(1,7),my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=192,my_kernel_size=(7,1),my_padding='same')
  
    layer_3_1=my_cnn2d(input_value=input_value,my_filters=160,my_kernel_size=(1,1),my_padding='same')
    layer_3_2=my_cnn2d(input_value=layer_3_1,my_filters=160,my_kernel_size=(7,1),my_padding='same')
    layer_3_3=my_cnn2d(input_value=layer_3_2,my_filters=160,my_kernel_size=(1,7),my_padding='same')
    layer_3_4=my_cnn2d(input_value=layer_3_3,my_filters=160,my_kernel_size=(7,1),my_padding='same')
    layer_3_5=my_cnn2d(input_value=layer_3_4,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    
    layer_4_1=layers.MaxPool2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_filters=192,my_kernel_size=(1,1),my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_5,layer_4_2])

def block_2_4(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=160,my_kernel_size=1,my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=160,my_kernel_size=(1,7),my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=192,my_kernel_size=(7,1),my_padding='same')
  
    layer_3_1=my_cnn2d(input_value=input_value,my_filters=160,my_kernel_size=(1,1),my_padding='same')
    layer_3_2=my_cnn2d(input_value=layer_3_1,my_filters=160,my_kernel_size=(7,1),my_padding='same')
    layer_3_3=my_cnn2d(input_value=layer_3_2,my_filters=160,my_kernel_size=(1,7),my_padding='same')
    layer_3_4=my_cnn2d(input_value=layer_3_3,my_filters=160,my_kernel_size=(7,1),my_padding='same')
    layer_3_5=my_cnn2d(input_value=layer_3_4,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    
    layer_4_1=layers.MaxPool2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_filters=192,my_kernel_size=(1,1),my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_5,layer_4_2])

def block_2_5(input_value):
    
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=192,my_kernel_size=(7,1),my_padding='same')
  
    layer_3_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=(1,1),my_padding='same')
    layer_3_2=my_cnn2d(input_value=layer_3_1,my_filters=192,my_kernel_size=(7,1),my_padding='same')
    layer_3_3=my_cnn2d(input_value=layer_3_2,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    layer_3_4=my_cnn2d(input_value=layer_3_3,my_filters=192,my_kernel_size=(7,1),my_padding='same')
    layer_3_5=my_cnn2d(input_value=layer_3_4,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    
    layer_4_1=layers.MaxPool2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_filters=192,my_kernel_size=(1,1),my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_5,layer_4_2])

def block_3_1(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=1,my_padding='same')
    layer_1_2=my_cnn2d(input_value=layer_1_1,my_filters=320,my_strides=2,my_kernel_size=3)
    
    layer_2_1=my_cnn2d(input_value=input_value,my_filters=192,my_kernel_size=(1,1),my_padding='same')
    layer_2_2=my_cnn2d(input_value=layer_2_1,my_filters=192,my_kernel_size=(1,7),my_padding='same')
    layer_2_3=my_cnn2d(input_value=layer_2_2,my_filters=192,my_kernel_size=(7,1),my_padding='same')
    layer_2_4=my_cnn2d(input_value=layer_2_3,my_filters=192,my_kernel_size=(3,3),my_strides=2)
    
    layer_3_1=layers.MaxPool2D(pool_size=3,strides=2)(input_value)

    return layers.Concatenate()([layer_1_2,layer_2_4,layer_3_1])

def block_3_2(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=320,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=384,my_padding='same')
    layer_2_2_1=my_cnn2d(input_value=layer_2_1,my_kernel_size=(1,3),my_filters=384,my_padding='same')
    layer_2_2_2=my_cnn2d(input_value=layer_2_1,my_kernel_size=(3,1),my_filters=384,my_padding='same')
    layer_2_3=layers.Concatenate()([layer_2_2_1,layer_2_2_2])
    
    layer_3_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=448,my_padding='same')
    layer_3_2=my_cnn2d(input_value=input_value,my_kernel_size=3,my_filters=384,my_padding='same')
    layer_3_3_1=my_cnn2d(input_value=layer_3_2,my_kernel_size=(1,3),my_filters=384,my_padding='same')
    layer_3_3_2=my_cnn2d(input_value=layer_3_2,my_kernel_size=(3,1),my_filters=384,my_padding='same')
    layer_3_4=layers.Concatenate()([layer_3_3_1,layer_3_3_2])
    
    layer_4_1=layers.AveragePooling2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_kernel_size=(1,1),my_filters=192,my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_4,layer_4_2])
    
    
    
    

def block_3_3(input_value):
    layer_1_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=320,my_padding='same')
    
    layer_2_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=384,my_padding='same')
    layer_2_2_1=my_cnn2d(input_value=layer_2_1,my_kernel_size=(1,3),my_filters=384,my_padding='same')
    layer_2_2_2=my_cnn2d(input_value=layer_2_1,my_kernel_size=(3,1),my_filters=384,my_padding='same')
    layer_2_3=layers.Concatenate()([layer_2_2_1,layer_2_2_2])
    
    layer_3_1=my_cnn2d(input_value=input_value,my_kernel_size=1,my_filters=448,my_padding='same')
    layer_3_2=my_cnn2d(input_value=input_value,my_kernel_size=3,my_filters=384,my_padding='same')
    layer_3_3_1=my_cnn2d(input_value=layer_3_2,my_kernel_size=(1,3),my_filters=384,my_padding='same')
    layer_3_3_2=my_cnn2d(input_value=layer_3_2,my_kernel_size=(3,1),my_filters=384,my_padding='same')
    layer_3_4=layers.Concatenate()([layer_3_3_1,layer_3_3_2])
    
    layer_4_1=layers.AveragePooling2D(pool_size=3,strides=1,padding='same')(input_value)
    layer_4_2=my_cnn2d(input_value=layer_4_1,my_kernel_size=(1,1),my_filters=192,my_padding='same')

    return layers.Concatenate()([layer_1_1,layer_2_3,layer_3_4,layer_4_2])




input_value=layers.Input(shape=(299,299,3))
conv_1=my_cnn2d(input_value=input_value,my_filters=32,my_kernel_size=(3,3),my_strides=(2,2))

conv_2=my_cnn2d(input_value=conv_1,my_filters=32,my_kernel_size=(3,3),my_strides=(1,1))


conv_3=my_cnn2d(input_value=conv_2,my_filters=64,my_kernel_size=(3,3),my_strides=(1,1))


conv_3=layers.MaxPool2D(pool_size=(3,3),strides=2,padding='same')(conv_3)

conv_4=my_cnn2d(input_value=conv_3,my_kernel_size=(1,1),my_filters=80)
conv_5=my_cnn2d(input_value=conv_4,my_kernel_size=(3,3),my_filters=192)
conv_5=layers.MaxPool2D(pool_size=(3,3),strides=2)(conv_5)

print(conv_5.shape)
blk_1_1=block_1_1(conv_5)
blk_1_2=block_1_2(blk_1_1)
blk_1_3=block_1_3(blk_1_2)

print(blk_1_3.shape)
blk_2_1=block_2_1(blk_1_3)
blk_2_2=block_2_2(blk_2_1)
blk_2_3=block_2_3(blk_2_2)
blk_2_4=block_2_4(blk_2_3)
blk_2_5=block_2_5(blk_2_4)

print(blk_2_5.shape)
blk_3_1=block_3_1(blk_2_5)
blk_3_2=block_3_2(blk_3_1)
blk_3_3=block_3_3(blk_3_2)
print(blk_3_3.shape)


# 自己组装Model
inception_v3_model=Model(inputs=input_value,outputs=blk_3_3)
inception_v3_model.summary()

模型概要:截取部分

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值