InceptionNet V3(keras实现)

写在前面:第一次自己看着网络图用代码搭建(记录我的进步哈哈,大佬勿嘲)

----------------------------------------分割线---------------------------------------

一、Google InceptionNet V3结构图

在这里插入图片描述

转载一波大佬的结构图,这是大佬文章链接,详细网络模块请移步

二、利用TensorFlow2.x中集成的Keras搭建InceptionNet V3的源代码

import tensorflow as tf
import numpy as np
import cv2 as cv
from PIL import Image
import matplotlib.pyplot as plt
import os
import random as rd
from tensorflow.keras.layers import Input,Conv2D,BatchNormalization,\
    MaxPooling2D,AveragePooling2D,Concatenate,Dense,Flatten,Activation

def build_inception_v3(input_shape=[299,299,3],):
    def my_conv2D(last_layer,my_filters=32,my_kernel_size=(3,3),my_stried=2,my_padding='valid'):
        x = Conv2D(filters=my_filters,kernel_size=my_kernel_size,strides=my_stried,padding=my_padding)(last_layer)
        x = BatchNormalization(scale=False)(x)
        x = Activation('relu')(x)
        return x

    def block1_1(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=48,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(5,5),my_filters=64,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(3,3), my_filters=96, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1,1), my_filters=32, my_stried=1,my_padding='same')

        x3_3 = my_conv2D(x2_3, my_kernel_size=(3, 3), my_filters=96, my_stried=1,my_padding='same')

        next_layer = Concatenate()([x1_1,x2_2,x3_3,x2_4])

        return next_layer

    def block1_2(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=48,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(5,5),my_filters=64,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(3,3), my_filters=96, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1,1), my_filters=64, my_stried=1,my_padding='same')

        x3_3 = my_conv2D(x2_3, my_kernel_size=(3, 3), my_filters=96, my_stried=1,my_padding='same')

        next_layer = Concatenate()([x1_1,x2_2,x3_3,x2_4])

        return next_layer

    def block1_3(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=48,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=64,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(5,5),my_filters=64,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(3,3), my_filters=96, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1,1), my_filters=64, my_stried=1,my_padding='same')

        x3_3 = my_conv2D(x2_3, my_kernel_size=(3, 3), my_filters=96, my_stried=1,my_padding='same')

        next_layer = Concatenate()([x1_1,x2_2,x3_3,x2_4])

        return next_layer

    def block2_1(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(3,3),my_filters=384,my_stried=2)
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=48,my_stried=1,my_padding='same')
        x1_3 = MaxPooling2D(pool_size=(3,3),strides=2)(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(3,3),my_filters=64,my_stried=1,my_padding='same')

        x3_2 = my_conv2D(x2_2, my_kernel_size=(3, 3), my_filters=96, my_stried=2)

        next_layer = Concatenate()([x1_1,x3_2,x1_3])

        return next_layer

    def block2_2(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=192,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=128,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=128,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(1,7),my_filters=128,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(7, 1), my_filters=128, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = my_conv2D(x2_2, my_kernel_size=(7, 1), my_filters=192, my_stried=1, my_padding='same')
        x3_3 = my_conv2D(x2_3, my_kernel_size=(1, 7), my_filters=128, my_stried=1, my_padding='same')

        x4_3 = my_conv2D(x3_3, my_kernel_size=(7, 1), my_filters=128, my_stried=1, my_padding='same')

        x5_3 = my_conv2D(x4_3, my_kernel_size=(1, 7), my_filters=192, my_stried=1, my_padding='same')

        next_layer = Concatenate()([x1_1,x3_2,x5_3,x2_4])

        return next_layer

    def block2_3_4(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=192,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=160,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=160,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(1,7),my_filters=160,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(7, 1), my_filters=160, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = my_conv2D(x2_2, my_kernel_size=(7, 1), my_filters=192, my_stried=1, my_padding='same')
        x3_3 = my_conv2D(x2_3, my_kernel_size=(1, 7), my_filters=160, my_stried=1, my_padding='same')

        x4_3 = my_conv2D(x3_3, my_kernel_size=(7, 1), my_filters=160, my_stried=1, my_padding='same')

        x5_3 = my_conv2D(x4_3, my_kernel_size=(1, 7), my_filters=192, my_stried=1, my_padding='same')

        next_layer = Concatenate()([x1_1,x3_2,x5_3,x2_4])

        return next_layer

    def block2_5(last_layer):
        x1_1 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=192,my_stried=1,my_padding='same')
        x1_2 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=192,my_stried=1,my_padding='same')
        x1_3 = my_conv2D(last_layer,my_kernel_size=(1,1),my_filters=192,my_stried=1,my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3,3),strides=1,padding='same')(last_layer)

        x2_2 = my_conv2D(x1_2,my_kernel_size=(1,7),my_filters=192,my_stried=1,my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(7, 1), my_filters=192, my_stried=1,my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = my_conv2D(x2_2, my_kernel_size=(7, 1), my_filters=192, my_stried=1, my_padding='same')
        x3_3 = my_conv2D(x2_3, my_kernel_size=(1, 7), my_filters=192, my_stried=1, my_padding='same')

        x4_3 = my_conv2D(x3_3, my_kernel_size=(7, 1), my_filters=192, my_stried=1, my_padding='same')

        x5_3 = my_conv2D(x4_3, my_kernel_size=(1, 7), my_filters=192, my_stried=1, my_padding='same')

        next_layer = Concatenate()([x1_1,x3_2,x5_3,x2_4])

        return next_layer

    def block3_1(last_layer):
        x1_1 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')
        x1_2 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')
        x1_3 = MaxPooling2D(pool_size=(3,3),strides=2)(last_layer)

        x2_1 = my_conv2D(x1_1, my_kernel_size=(3, 3), my_filters=320, my_stried=2)
        x2_2 = my_conv2D(x1_2, my_kernel_size=(1, 7), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = my_conv2D(x2_2, my_kernel_size=(7, 1), my_filters=192, my_stried=1, my_padding='same')

        x4_2 = my_conv2D(x3_2, my_kernel_size=(3, 3), my_filters=192, my_stried=2)

        next_layer = Concatenate()([x2_1, x4_2, x1_3])

        return next_layer

    def block3_2(last_layer):
        x1_1 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=320, my_stried=1, my_padding='same')
        x1_2 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=320, my_stried=1, my_padding='same')
        x1_3 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=320, my_stried=1, my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3, 3), strides=1, padding='same')(last_layer)

        x2_2_1 = my_conv2D(x1_2, my_kernel_size=(1, 3), my_filters=384, my_stried=1, my_padding='same')
        x2_2_2 = my_conv2D(x1_2, my_kernel_size=(3, 1), my_filters=384, my_stried=1, my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(3, 3), my_filters=384, my_stried=1, my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = Concatenate()([x2_2_1, x2_2_2])
        x3_3_1 = my_conv2D(x2_3, my_kernel_size=(1, 3), my_filters=384, my_stried=1, my_padding='same')
        x3_3_2 = my_conv2D(x2_3, my_kernel_size=(3, 1), my_filters=384, my_stried=1, my_padding='same')

        x4_3 = Concatenate()([x3_3_1, x3_3_2])

        next_layer = Concatenate()([x1_1,x3_2, x4_3, x2_4])

        return next_layer

    def block3_3(last_layer):
        x1_1 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=320, my_stried=1, my_padding='same')
        x1_2 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=384, my_stried=1, my_padding='same')
        x1_3 = my_conv2D(last_layer, my_kernel_size=(1, 1), my_filters=448, my_stried=1, my_padding='same')
        x1_4 = AveragePooling2D(pool_size=(3, 3), strides=1, padding='same')(last_layer)

        x2_2_1 = my_conv2D(x1_2, my_kernel_size=(1, 3), my_filters=384, my_stried=1, my_padding='same')
        x2_2_2 = my_conv2D(x1_2, my_kernel_size=(3, 1), my_filters=384, my_stried=1, my_padding='same')
        x2_3 = my_conv2D(x1_3, my_kernel_size=(3, 3), my_filters=384, my_stried=1, my_padding='same')
        x2_4 = my_conv2D(x1_4, my_kernel_size=(1, 1), my_filters=192, my_stried=1, my_padding='same')

        x3_2 = Concatenate()([x2_2_1, x2_2_2])
        x3_3_1 = my_conv2D(x2_3, my_kernel_size=(1, 3), my_filters=384, my_stried=1, my_padding='same')
        x3_3_2 = my_conv2D(x2_3, my_kernel_size=(3, 1), my_filters=384, my_stried=1, my_padding='same')

        x4_3 = Concatenate()([x3_3_1, x3_3_2])

        next_layer = Concatenate()([x1_1,x3_2, x4_3, x2_4])

        return next_layer


    input = Input(shape=input_shape,)
    layer1 = my_conv2D(input)
    layer2 = my_conv2D(layer1,my_stried=1)
    layer3 = my_conv2D(layer2,my_filters=64,my_stried=1)
    layer4 = MaxPooling2D((3,3),strides=2,padding='same')(layer3)
    layer5 = my_conv2D(layer4,my_filters=80,my_stried=1)
    layer6 = my_conv2D(layer5, my_filters=192, my_stried=1)
    layer7 = MaxPooling2D((3, 3), strides=2,padding='same')(layer6)

    layer1_1 = block1_1(layer7)
    layer1_2 = block1_2(layer1_1)
    layer1_3 = block1_3(layer1_2)

    layer2_1 = block2_1(layer1_3)
    layer2_2 = block2_2(layer2_1)
    layer2_3 = block2_3_4(layer2_2)
    layer2_4 = block2_3_4(layer2_3)
    layer2_5 = block2_5(layer2_4)

    layer3_1 = block3_1(layer2_5)
    layer3_2 = block3_2(layer3_1)
    layer3_3 = block3_3(layer3_2)


    inception_v3_model = tf.keras.Model(inputs=input,outputs=layer3_3)
    inception_v3_model.summary()
    return inception_v3_model

if __name__=='__main__':
	inception_v3_model = build_inception_v3()

三、待改进之处

  • 有些代码重复度高,可增加函数参数将一类结构网络用一个函数实现
  • 希望以后能熟练用类来搭建神经网络,这样显得专业<_<

四、学习心得

之前一直觉得自己搭建网络太难啦,想直接跑其他人的网络又看不懂代码,总觉得是自己不适合这个方向。之前看原理的时候感觉懂了,就是实现不来,便开始怀疑自己,后来发现还是框架不会用。于是花了点时间学习一些Keras函数详解,在慢慢跟着结构图上手感觉feel 倍儿棒!!加油,追梦人

  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是使用Inception V3进行迁移学习实现的Python代码示例。假设你已经安装了TensorFlow和Keras,并且已经准备好了用于训练的数据集。 ```python import tensorflow as tf from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model # 设置数据集路径 train_data_dir = 'path/to/training/dataset' validation_data_dir = 'path/to/validation/dataset' # 设置训练参数 batch_size = 32 epochs = 10 num_classes = 2 # 创建Inception V3模型 base_model = InceptionV3(weights='imagenet', include_top=False) # 添加全局平均池化层 x = base_model.output x = GlobalAveragePooling2D()(x) # 添加全连接层和softmax激活层 x = Dense(1024, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) # 构建完整的模型 model = Model(inputs=base_model.input, outputs=predictions) # 冻结Inception V3模型的所有层 for layer in base_model.layers: layer.trainable = False # 编译模型 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 数据增强生成器 train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) validation_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(299, 299), batch_size=batch_size, class_mode='categorical') validation_generator = validation_datagen.flow_from_directory( validation_data_dir, target_size=(299, 299), batch_size=batch_size, class_mode='categorical') # 训练模型 model.fit_generator( train_generator, steps_per_epoch=train_generator.samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=validation_generator.samples // batch_size) # 保存模型 model.save('inceptionv3_model.h5') ``` 以上代码中,我们使用了Keras中的ImageDataGenerator类来进行数据增强,以增加数据集的多样性。我们还通过调用fit_generator()方法来训练模型,并使用了save()方法将训练好的模型保存到本地。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值