yolov3从头实现(四)-- darknet53网络tf.keras搭建

darknet53网络tf.keras搭建

一、定义darknet块类

1 、darknet块网络结构

在这里插入图片描述

2、darknet块实现

# 定义darknet块类
class _ResidualBlock(tf.keras.Model):
    def __init__(self, filters, layer_idx, name=""):
        '''
        :param filters: 存放卷积核个数的列表,存放两次卷积的卷积核数
        :param layer_idx: 操作名称 存放两次卷积的名称
        :param name: darknet块名称
        '''
        super(_ResidualBlock, self).__init__(name=name)
        filters1, filters2 = filters
        layer1, layer2 = layer_idx

        layer_name1 = "layer_{}".format(str(layer1))
        layer_name2 = "layer_{}".format(str(layer2))

        self.conv2a = layers.Conv2D(filters1, (1, 1), padding='same', use_bias=False, name=layer_name1)  # 定义卷积操作--创建卷积对象
        self.bn2a = layers.BatchNormalization(epsilon=0.001, name=layer_name1)                           # 定义BN操作--创建BN对象

        self.conv2b = layers.Conv2D(filters2, (3, 3), padding='same', use_bias=False, name=layer_name2)  # 定义卷积操作--创建卷积对象
        self.bn2b = layers.BatchNormalization(epsilon=0.001, name=layer_name2)                           # 定义BN操作--创建BN对象

    def call(self, input_tensor, training=False):  # 本函数可使用本类的实例化对象名直接调用
        '''

        :param input_tensor: 输入tensor
        :param training: 是否训练的标志
        :return: 处理后的结果
        '''
        x = self.conv2a(input_tensor)               # 利用卷积对象的call函数实现卷积
        x = self.bn2a(x, training=training)         # 利用BN对象的call函数实现BN
        x = tf.nn.leaky_relu(x, alpha=0.1)          # 激活函数
        
        x = self.conv2b(x)
        x = self.bn2b(x, training=training)
        x = tf.nn.leaky_relu(x, alpha=0.1)

        x += input_tensor                           # 短接
        return x

二、定义卷积池化块类–即步长为2的卷积

1、卷积池化块结构

在这里插入图片描述

2、卷积池化块类实现

# 定义卷积池化块类
class _ConvPoolBlock(tf.keras.Model):                # 定义卷积池化块类,继承tf.keras.Model类
    def __init__(self, filters, layer_idx, name=""):
        '''

        :param filters: 卷积核个数
        :param layer_idx: 池化操作名称
        :param name: 标志名
        '''
        super(_ConvPoolBlock, self).__init__(name=name) # 定义构造函数 调用父类的构造函数

        layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积池化的名称

        self.pad = layers.ZeroPadding2D(((1,0),(1,0)))  # padding
        self.conv = layers.Conv2D(filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name=layer_name) # 卷积操作定义
        self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name) # BN定义

    def call(self, input_tensor, training=False):
        '''

        :param input_tensor: 用于卷积池化的输入
        :param training: 是否训练的标志
        :return: 卷积池化的结果
        '''

        x = self.pad(input_tensor)
        x = self.conv(x)
        x = self.bn(x, training=training)
        x = tf.nn.leaky_relu(x, alpha=0.1)
        return x

三、定义卷积块类

1、卷积块结构

在这里插入图片描述

四、由以上模块来组合成darknet53类

1、darknet53网络结构

在这里插入图片描述

2、darknet53类的实现

# 定义Darknet53类
class Darknet53(tf.keras.Model): #定义Darknet53类,继承tf.keras.Model类
    # 定义构造函数,在自己的构造函数中继承tf.keras.Model类的构造函数
    def __init__(self):
        super(Darknet53, self).__init__(name='')
        # 创建网络结构所以需要得模块类对象
        # (256, 256, 3)
        self.l0a = _ConvBlock(32, layer_idx=0, name="stage0")           # 创建卷积块类对象
        self.l0_pool = _ConvPoolBlock(64, layer_idx=1, name="stage0")

        # (128, 128, 64)
        self.l1a = _ResidualBlock([32, 64], layer_idx=[2, 3], name="stage1")
        self.l1_pool = _ConvPoolBlock(128, layer_idx=4, name="stage1")

        # (64, 64, 128)
        self.l2a = _ResidualBlock([64, 128], layer_idx=[5, 6], name="stage2")
        self.l2b = _ResidualBlock([64, 128], layer_idx=[7, 8], name="stage2")
        self.l2_pool = _ConvPoolBlock(256, layer_idx=9, name="stage2")

        # (32, 32, 256)
        self.l3a = _ResidualBlock([128, 256], layer_idx=[10, 11], name="stage3")
        self.l3b = _ResidualBlock([128, 256], layer_idx=[12, 13], name="stage3")
        self.l3c = _ResidualBlock([128, 256], layer_idx=[14, 15], name="stage3")
        self.l3d = _ResidualBlock([128, 256], layer_idx=[16, 17], name="stage3")
        self.l3e = _ResidualBlock([128, 256], layer_idx=[18, 19], name="stage3")
        self.l3f = _ResidualBlock([128, 256], layer_idx=[20, 21], name="stage3")
        self.l3g = _ResidualBlock([128, 256], layer_idx=[22, 23], name="stage3")
        self.l3h = _ResidualBlock([128, 256], layer_idx=[24, 25], name="stage3")
        self.l3_pool = _ConvPoolBlock(512, layer_idx=26, name="stage3")
        
        # (16, 16, 512)
        self.l4a = _ResidualBlock([256, 512], layer_idx=[27, 28], name="stage4")
        self.l4b = _ResidualBlock([256, 512], layer_idx=[29, 30], name="stage4")
        self.l4c = _ResidualBlock([256, 512], layer_idx=[31, 32], name="stage4")
        self.l4d = _ResidualBlock([256, 512], layer_idx=[33, 34], name="stage4")
        self.l4e = _ResidualBlock([256, 512], layer_idx=[35, 36], name="stage4")
        self.l4f = _ResidualBlock([256, 512], layer_idx=[37, 38], name="stage4")
        self.l4g = _ResidualBlock([256, 512], layer_idx=[39, 40], name="stage4")
        self.l4h = _ResidualBlock([256, 512], layer_idx=[41, 42], name="stage4")
        self.l4_pool = _ConvPoolBlock(1024, layer_idx=43, name="stage4")

        # (8, 8, 1024)
        self.l5a = _ResidualBlock([512, 1024], layer_idx=[44, 45], name="stage5")
        self.l5b = _ResidualBlock([512, 1024], layer_idx=[46, 47], name="stage5")
        self.l5c = _ResidualBlock([512, 1024], layer_idx=[48, 49], name="stage5")
        self.l5d = _ResidualBlock([512, 1024], layer_idx=[50, 51], name="stage5")
        
        self.num_layers = 52
        self._init_vars()   # 自定义初始化函数变量

    def call(self, input_tensor, training=False):
        # 根据创建网络结构所以需要得模块类对象,执行相应的方法
        x = self.l0a(input_tensor, training)
        x = self.l0_pool(x, training)

        x = self.l1a(x, training)
        x = self.l1_pool(x, training)

        x = self.l2a(x, training)
        x = self.l2b(x, training)
        x = self.l2_pool(x, training)

        x = self.l3a(x, training)
        x = self.l3b(x, training)
        x = self.l3c(x, training)
        x = self.l3d(x, training)
        x = self.l3e(x, training)
        x = self.l3f(x, training)
        x = self.l3g(x, training)
        x = self.l3h(x, training)
        output_stage3 = x
        x = self.l3_pool(x, training)

        x = self.l4a(x, training)
        x = self.l4b(x, training)
        x = self.l4c(x, training)
        x = self.l4d(x, training)
        x = self.l4e(x, training)
        x = self.l4f(x, training)
        x = self.l4g(x, training)
        x = self.l4h(x, training)
        output_stage4 = x
        x = self.l4_pool(x, training)

        x = self.l5a(x, training)
        x = self.l5b(x, training)
        x = self.l5c(x, training)
        x = self.l5d(x, training)
        output_stage5 = x
        return output_stage3, output_stage4, output_stage5
    
    def get_variables(self, layer_idx, suffix=None):
        '''

        :param layer_idx: 卷积名称
        :param suffix:
        :return: 变量名对应的卷积层的权重值
        '''
        if suffix:
            find_name = "layer_{}/{}".format(layer_idx, suffix)
        else:
            find_name = "layer_{}/".format(layer_idx)
        variables = []
        for v in self.variables:
            if find_name in v.name:
                variables.append(v)
        return variables

    def _init_vars(self):   # 测试本类功能的函数
        import numpy as np
        imgs = np.random.randn(1, 256, 256, 3).astype(np.float32)
        input_tensor = tf.constant(imgs)
        return self.call(input_tensor)

五、测试

if __name__ == '__main__':
    import numpy as np
    # 启用动态图机制
    tf.enable_eager_execution()
    # 生成模拟图片
    batch_size = 5
    height, width = 256, 256
    inputs = tf.random_uniform((batch_size, height, width, 3))  # 模拟图片
    # 创建Darknet53对象
    darknet53 = Darknet53()
    # 调用darknet53网络的call()方法
    # a,b,c = darknet53(inputs, training=False)
    a,b,c = darknet53._init_vars()
    # 输出三个网络层的输出
    print(a.shape,b.shape,c.shape)

结果:输出的是网络的三个尺度的特征图的形状
在这里插入图片描述

六、原码

import tensorflow as tf

layers = tf.keras.layers

# 定义Darknet53类
class Darknet53(tf.keras.Model): #定义Darknet53类,继承tf.keras.Model类
    # 定义构造函数,在自己的构造函数中继承tf.keras.Model类的构造函数
    def __init__(self):
        super(Darknet53, self).__init__(name='')
        # 创建网络结构所以需要得模块类对象
        # (256, 256, 3)
        self.l0a = _ConvBlock(32, layer_idx=0, name="stage0")           # 创建卷积块类对象
        self.l0_pool = _ConvPoolBlock(64, layer_idx=1, name="stage0")

        # (128, 128, 64)
        self.l1a = _ResidualBlock([32, 64], layer_idx=[2, 3], name="stage1")
        self.l1_pool = _ConvPoolBlock(128, layer_idx=4, name="stage1")

        # (64, 64, 128)
        self.l2a = _ResidualBlock([64, 128], layer_idx=[5, 6], name="stage2")
        self.l2b = _ResidualBlock([64, 128], layer_idx=[7, 8], name="stage2")
        self.l2_pool = _ConvPoolBlock(256, layer_idx=9, name="stage2")

        # (32, 32, 256)
        self.l3a = _ResidualBlock([128, 256], layer_idx=[10, 11], name="stage3")
        self.l3b = _ResidualBlock([128, 256], layer_idx=[12, 13], name="stage3")
        self.l3c = _ResidualBlock([128, 256], layer_idx=[14, 15], name="stage3")
        self.l3d = _ResidualBlock([128, 256], layer_idx=[16, 17], name="stage3")
        self.l3e = _ResidualBlock([128, 256], layer_idx=[18, 19], name="stage3")
        self.l3f = _ResidualBlock([128, 256], layer_idx=[20, 21], name="stage3")
        self.l3g = _ResidualBlock([128, 256], layer_idx=[22, 23], name="stage3")
        self.l3h = _ResidualBlock([128, 256], layer_idx=[24, 25], name="stage3")
        self.l3_pool = _ConvPoolBlock(512, layer_idx=26, name="stage3")
        
        # (16, 16, 512)
        self.l4a = _ResidualBlock([256, 512], layer_idx=[27, 28], name="stage4")
        self.l4b = _ResidualBlock([256, 512], layer_idx=[29, 30], name="stage4")
        self.l4c = _ResidualBlock([256, 512], layer_idx=[31, 32], name="stage4")
        self.l4d = _ResidualBlock([256, 512], layer_idx=[33, 34], name="stage4")
        self.l4e = _ResidualBlock([256, 512], layer_idx=[35, 36], name="stage4")
        self.l4f = _ResidualBlock([256, 512], layer_idx=[37, 38], name="stage4")
        self.l4g = _ResidualBlock([256, 512], layer_idx=[39, 40], name="stage4")
        self.l4h = _ResidualBlock([256, 512], layer_idx=[41, 42], name="stage4")
        self.l4_pool = _ConvPoolBlock(1024, layer_idx=43, name="stage4")

        # (8, 8, 1024)
        self.l5a = _ResidualBlock([512, 1024], layer_idx=[44, 45], name="stage5")
        self.l5b = _ResidualBlock([512, 1024], layer_idx=[46, 47], name="stage5")
        self.l5c = _ResidualBlock([512, 1024], layer_idx=[48, 49], name="stage5")
        self.l5d = _ResidualBlock([512, 1024], layer_idx=[50, 51], name="stage5")
        
        self.num_layers = 52
        self._init_vars()   # 自定义初始化函数变量

    def call(self, input_tensor, training=False):
        # 根据创建网络结构所以需要得模块类对象,执行相应的方法
        x = self.l0a(input_tensor, training)
        x = self.l0_pool(x, training)

        x = self.l1a(x, training)
        x = self.l1_pool(x, training)

        x = self.l2a(x, training)
        x = self.l2b(x, training)
        x = self.l2_pool(x, training)

        x = self.l3a(x, training)
        x = self.l3b(x, training)
        x = self.l3c(x, training)
        x = self.l3d(x, training)
        x = self.l3e(x, training)
        x = self.l3f(x, training)
        x = self.l3g(x, training)
        x = self.l3h(x, training)
        output_stage3 = x
        x = self.l3_pool(x, training)

        x = self.l4a(x, training)
        x = self.l4b(x, training)
        x = self.l4c(x, training)
        x = self.l4d(x, training)
        x = self.l4e(x, training)
        x = self.l4f(x, training)
        x = self.l4g(x, training)
        x = self.l4h(x, training)
        output_stage4 = x
        x = self.l4_pool(x, training)

        x = self.l5a(x, training)
        x = self.l5b(x, training)
        x = self.l5c(x, training)
        x = self.l5d(x, training)
        output_stage5 = x
        return output_stage3, output_stage4, output_stage5
    
    def get_variables(self, layer_idx, suffix=None):
        '''

        :param layer_idx: 卷积名称
        :param suffix:
        :return: 变量名对应的卷积层的权重值
        '''
        if suffix:
            find_name = "layer_{}/{}".format(layer_idx, suffix)
        else:
            find_name = "layer_{}/".format(layer_idx)
        variables = []
        for v in self.variables:
            if find_name in v.name:
                variables.append(v)
        return variables

    def _init_vars(self):   # 测试本类功能的函数
        import numpy as np
        imgs = np.random.randn(1, 256, 256, 3).astype(np.float32)
        input_tensor = tf.constant(imgs)
        return self.call(input_tensor)

# 定义卷积块类
class _ConvBlock(tf.keras.Model):                       # 定义卷积块类,继承tf.keras.Model类
    def __init__(self, filters, layer_idx, name=""):    # 定义构造函数 调用父类的构造函数
        '''

        :param filters: 卷积核个数
        :param layer_idx: 卷积操作名称
        :param name: 标志名
        '''
        super(_ConvBlock, self).__init__(name=name)
        
        layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积的名称

        self.conv = layers.Conv2D(filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, name=layer_name)  # 真实的卷积操作
        self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name)                                          # 卷积+BN

    def call(self, input_tensor, training=False):  # 方法重写
        '''

        :param input_tensor: 用于卷积的输入
        :param training: 是否为训练的标志
        :return:
        '''

        x = self.conv(input_tensor)         # 进行卷积
        x = self.bn(x, training=training)   # 进行BN
        x = tf.nn.leaky_relu(x, alpha=0.1)  # 进行leaky_relu激活
        return x

# 定义卷积池化块类
class _ConvPoolBlock(tf.keras.Model):                # 定义卷积池化块类,继承tf.keras.Model类
    def __init__(self, filters, layer_idx, name=""):
        '''

        :param filters: 卷积核个数
        :param layer_idx: 池化操作名称
        :param name: 标志名
        '''
        super(_ConvPoolBlock, self).__init__(name=name) # 定义构造函数 调用父类的构造函数

        layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积池化的名称

        self.pad = layers.ZeroPadding2D(((1,0),(1,0)))  # padding
        self.conv = layers.Conv2D(filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name=layer_name) # 卷积操作定义
        self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name) # BN定义

    def call(self, input_tensor, training=False):
        '''

        :param input_tensor: 用于卷积池化的输入
        :param training: 是否训练的标志
        :return: 卷积池化的结果
        '''

        x = self.pad(input_tensor)
        x = self.conv(x)
        x = self.bn(x, training=training)
        x = tf.nn.leaky_relu(x, alpha=0.1)
        return x

# 定义darknet块类
class _ResidualBlock(tf.keras.Model):
    def __init__(self, filters, layer_idx, name=""):
        '''
        :param filters: 存放卷积核个数的列表,存放两次卷积的卷积核数
        :param layer_idx: 操作名称 存放两次卷积的名称
        :param name: darknet块名称
        '''
        super(_ResidualBlock, self).__init__(name=name)
        filters1, filters2 = filters
        layer1, layer2 = layer_idx

        layer_name1 = "layer_{}".format(str(layer1))
        layer_name2 = "layer_{}".format(str(layer2))

        self.conv2a = layers.Conv2D(filters1, (1, 1), padding='same', use_bias=False, name=layer_name1)  # 定义卷积操作--创建卷积对象
        self.bn2a = layers.BatchNormalization(epsilon=0.001, name=layer_name1)                           # 定义BN操作--创建BN对象

        self.conv2b = layers.Conv2D(filters2, (3, 3), padding='same', use_bias=False, name=layer_name2)  # 定义卷积操作--创建卷积对象
        self.bn2b = layers.BatchNormalization(epsilon=0.001, name=layer_name2)                           # 定义BN操作--创建BN对象

    def call(self, input_tensor, training=False):  # 使用本类的实例化对象调用
        '''

        :param input_tensor: 输入tensor
        :param training: 是否训练的标志
        :return: 处理后的结果
        '''
        x = self.conv2a(input_tensor)               # 利用卷积对象的call函数实现卷积
        x = self.bn2a(x, training=training)         # 利用BN对象的call函数实现BN
        x = tf.nn.leaky_relu(x, alpha=0.1)          # 激活函数
        
        x = self.conv2b(x)
        x = self.bn2b(x, training=training)
        x = tf.nn.leaky_relu(x, alpha=0.1)

        x += input_tensor                           # 短接
        return x

# =========================================测试以上程序是否能跑通===================================================
if __name__ == '__main__':
    import numpy as np
    # 启用动态图机制
    tf.enable_eager_execution()
    # 生成模拟图片
    batch_size = 5
    height, width = 256, 256
    inputs = tf.random_uniform((batch_size, height, width, 3))  # 模拟图片
    # 创建Darknet53对象
    darknet53 = Darknet53()
    # 调用darknet53网络的call()方法
    # a,b,c = darknet53(inputs, training=False)
    a,b,c = darknet53._init_vars()
    # 输出三个网络层的输出
    print(a.shape,b.shape,c.shape)
  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值