tensorflow2.1 构建Vgg16、GoogLeNet-v1、resNet18

一、Vgg16

from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, ReLU, MaxPool2D, Flatten, Dropout, InputLayer
from tensorflow.keras.models import Sequential


def make_block(conv_num, filters):
    """

    :param conv_num: 卷积层数量
    :param filters: 卷积核数量
    :return:
    """
    conv_block = Sequential()
    for i in range(conv_num):
        conv_block.add(Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',
                              name='block_' + str(conv_num) + "_" + str(i)),
                       )
        conv_block.add(BatchNormalization())
        conv_block.add(ReLU())
    conv_block.add(MaxPool2D())
    return conv_block


def creat_vgg(block_param, class_num, input_shape):
    """

    :param block_param: vgg16:((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))
    :param class_num: 分类数量
    :param input_shape: 输入大小 (h,w,n)
    :return:
    """
    vgg = Sequential()
    vgg.add(InputLayer(input_shape=input_shape, name="input_layer"))
    for (conv_num, filters) in block_param:
        vgg.add(make_block(conv_num, filters))
    vgg.add(Flatten(name='flatten'))
    vgg.add(Dense(units=2048, activation='relu', name='dense_1'))
    vgg.add(Dropout(0.2))
    vgg.add(Dense(units=1024, activation='relu', name='dense_2'))
    vgg.add(Dropout(0.2))
    vgg.add(Dense(units=class_num, activation='softmax', name='out_layer'))

    return vgg


vgg16_block_parameters = ((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))
vgg16 = creat_vgg(vgg16_block_parameters, 10, (512, 512, 3))
print(vgg16.summary())

二、GoogLeNet-v1

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU, MaxPool2D, Dense, AvgPool2D, Flatten
from tensorflow.keras.models import Sequential


class Inception(tf.keras.layers.Layer):
    def __init__(self, c1, c2, c3, c4):
        super(Inception, self).__init__()
        self.block_1 = Conv2D(c1, kernel_size=1, strides=1, padding='same', activation='relu')
        self.block_2_1 = Conv2D(c2[0], kernel_size=1, strides=1, padding='same', activation='relu')
        self.block_2_2 = Conv2D(c2[1], kernel_size=3, strides=1, padding='same', activation='relu')
        self.block_3_1 = Conv2D(c3[0], kernel_size=1, strides=1, padding='same', activation='relu')
        self.block_3_2 = Conv2D(c3[1], kernel_size=5, strides=1, padding='same', activation='relu')
        self.block_4_1 = MaxPool2D(pool_size=3, strides=1, padding='same')
        self.block_4_2 = Conv2D(c4, kernel_size=1, strides=1, activation='relu', padding='same')

    def call(self, inputs, **kwargs):
        b1_out = self.block_1(inputs)
        b2_out = self.block_2_2(self.block_2_1(inputs))
        b3_out = self.block_3_2(self.block_3_1(inputs))
        b4_out = self.block_4_2(self.block_4_1(inputs))
        out = tf.concat((b1_out, b2_out, b3_out, b4_out), axis=-1)
        return out


# 辅助训练分支
def aux_classifier(x_in, filters):
    x = AvgPool2D(pool_size=5, strides=3, padding='same')(x_in)
    x = Conv2D(filters[0], kernel_size=1, strides=1, padding='valid', activation='relu')(x)
    x = Flatten()(x)
    x = Dense(units=filters[1], activation='relu')(x)
    x = Dense(units=10, activation='softmax')(x)
    return x


# B1模块
model_inputs = tf.keras.Input(shape=(224, 224, 3))
x = Conv2D(filters=64, kernel_size=7, strides=2, activation='relu', padding='same')(model_inputs)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)

# B2 模块
x = Conv2D(filters=192, kernel_size=3, strides=1, activation='relu', padding='same')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)

# B3 模块
x = Inception(64, (96, 128), (16, 32), 32)(x)
x = Inception(128, (128, 192), (32, 96), 64)(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)

# B4 模块
x = Inception(192, (96, 208), (16, 48), 64)(x)
# 辅助输出1
aux_output_1 = aux_classifier(x, [128, 1024])
x = Inception(160, (112, 224), (24, 64), 64)(x)
x = Inception(128, (128, 256), (24, 64), 64)(x)
x = Inception(112, (144, 288), (32, 64), 64)(x)
# 辅助输出2
aux_output_2 = aux_classifier(x, [128, 1024])
x = Inception(256, (160, 320), (32, 128), 128)(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)

# b5 模块
# Inception
x = Inception(256, (160, 320), (32, 128), 128)(x)
# Inception
x = Inception(384, (192, 384), (48, 128), 128)(x)
# GAP
x = tf.keras.layers.GlobalAvgPool2D()(x)
# 输出层
main_outputs = tf.keras.layers.Dense(10, activation='softmax')(x)
# 使用Model来创建模型,指明输入和输出

# 使用Model来创建模型,指明输入和输出
model = tf.keras.Model(inputs=model_inputs, outputs=[main_outputs, aux_output_1, aux_output_2])

三、ResNet-18

# 导入相关的工具包
import tensorflow as tf
from tensorflow.keras import layers, activations


# 定义ResNet的残差块
class Residual(tf.keras.Model):
    # 指明残差块的通道数,是否使用1*1卷积,步长
    def __init__(self, num_channels, use_1x1conv=False, strides=1):
        super(Residual, self).__init__()
        # 卷积层:指明卷积核个数,padding,卷积核大小,步长
        self.conv1 = layers.Conv2D(num_channels,
                                   padding='same',
                                   kernel_size=3,
                                   strides=strides)
        # 卷积层:指明卷积核个数,padding,卷积核大小,步长
        self.conv2 = layers.Conv2D(num_channels, kernel_size=3, padding='same')
        if use_1x1conv:
            self.conv3 = layers.Conv2D(num_channels,
                                       kernel_size=1,
                                       strides=strides)
        else:
            self.conv3 = None
        # 指明BN层
        self.bn1 = layers.BatchNormalization()
        self.bn2 = layers.BatchNormalization()

    # 定义前向传播过程
    def call(self, X):
        # 卷积,BN,激活
        Y = activations.relu(self.bn1(self.conv1(X)))
        # 卷积,BN
        Y = self.bn2(self.conv2(Y))
        # 对输入数据进行1*1卷积保证通道数相同
        if self.conv3:
            X = self.conv3(X)
        # 返回与输入相加后激活的结果
        return activations.relu(Y + X)


# ResNet网络中模块的构成
class ResnetBlock(tf.keras.layers.Layer):
    # 网络层的定义:输出通道数(卷积核个数),模块中包含的残差块个数,是否为第一个模块
    def __init__(self, num_channels, num_residuals, first_block=False):
        super(ResnetBlock, self).__init__()
        # 模块中的网络层
        self.listLayers = []
        # 遍历模块中所有的层
        for i in range(num_residuals):
            # 若为第一个残差块并且不是第一个模块,则使用1*1卷积,步长为2(目的是减小特征图,并增大通道数)
            if i == 0 and not first_block:
                self.listLayers.append(Residual(num_channels, use_1x1conv=True, strides=2))
            # 否则不使用1*1卷积,步长为1
            else:
                self.listLayers.append(Residual(num_channels))
                # 定义前向传播过程

    def call(self, X):
        # 所有层依次向前传播即可
        for layer in self.listLayers.layers:
            X = layer(X)
        return X


# 构建ResNet网络
class ResNet(tf.keras.Model):
    # 初始化:指定每个模块中的残差快的个数
    def __init__(self, num_blocks):
        super(ResNet, self).__init__()
        # 输入层:7*7卷积,步长为2
        self.conv = layers.Conv2D(64, kernel_size=7, strides=2, padding='same')
        # BN层
        self.bn = layers.BatchNormalization()
        # 激活层
        self.relu = layers.Activation('relu')
        # 最大池化层
        self.mp = layers.MaxPool2D(pool_size=3, strides=2, padding='same')
        # 第一个block,通道数为64
        self.resnet_block1 = ResnetBlock(64, num_blocks[0], first_block=True)
        # 第二个block,通道数为128
        self.resnet_block2 = ResnetBlock(128, num_blocks[1])
        # 第三个block,通道数为256
        self.resnet_block3 = ResnetBlock(256, num_blocks[2])
        # 第四个block,通道数为512
        self.resnet_block4 = ResnetBlock(512, num_blocks[3])
        # 全局平均池化
        self.gap = layers.GlobalAvgPool2D()
        # 全连接层:分类
        self.fc = layers.Dense(units=10, activation=tf.keras.activations.softmax)

    # 前向传播过程
    def call(self, x):
        # 卷积
        x = self.conv(x)
        # BN
        x = self.bn(x)
        # 激活
        x = self.relu(x)
        # 最大池化
        x = self.mp(x)
        # 残差模块
        x = self.resnet_block1(x)
        x = self.resnet_block2(x)
        x = self.resnet_block3(x)
        x = self.resnet_block4(x)
        # 全局平均池化
        x = self.gap(x)
        # 全链接层
        x = self.fc(x)
        return x


# 模型实例化:指定每个block中的残差块个数
resnet18 = ResNet([2, 2, 2, 2])
resnet18.build(input_shape=(None, 256, 256, 3))
resnet18.summary()

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值