Densenet(2018)

在这里插入图片描述

实验结果

在这里插入图片描述

不同Densenet对比

在这里插入图片描述

ResNet与DenseNet对比
Network

在这里插入图片描述

在这里插入图片描述

改进方法

resenet 网络结构如下:
在这里插入图片描述
densenet:
在这里插入图片描述

code
import tensorflow as tf
from tensorflow.keras import layers, Sequential, Model


class BottleNeck(layers.Layer):
    def __init__(self, growth_rate):
        super(BottleNeck, self).__init__()
        inner_channel = 4 * growth_rate  
        #根据论文  每个1x1 卷积产生4K个feature map,growh_rate也即K is hyper-parameter

        self.bottle_neck = Sequential([
            layers.BatchNormalization(),
            layers.ReLU(),
            layers.Conv2D(inner_channel, (1, 1), use_bias=False),
            layers.BatchNormalization(),
            layers.ReLU(),
            layers.Conv2D(growth_rate, (3, 3), padding='same', use_bias=False)
        ])

    def call(self, x, training=False):
        return tf.concat([x, self.bottle_neck(x, training=training)], axis=-1)


class Transition(layers.Layer):
    def __init__(self, out_channels):
        super(Transition, self).__init__()

        self.down_sample = Sequential([
            layers.BatchNormalization(),
            layers.Conv2D(out_channels, (1, 1), use_bias=False),
            layers.AveragePooling2D((2, 2), strides=2)
        ])

    def call(self, x, training=False):
        return self.down_sample(x, training=training)


class DenseNet(Model):
    def __init__(self,
                 num_classes,
                 block,
                 nblocks,
                 growth_rate=12,
                 reduction=0.5,#压缩因子
                 input_shape=(32, 32, 3)):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate
        inner_channels = 2 * growth_rate   
        
         #在进入first DenseNet block之前
        self.conv1 = Sequential([
            layers.Input(input_shape),
            layers.Conv2D(inner_channels, (3, 3),
                          padding='same', use_bias=False)
        ])

        self.features = Sequential()
        #前3个block+transition
        for idx in range(len(nblocks) - 1):  #遍历block数量 list  0 1 2  postition
            self.features.add(block(nblocks[idx]))  ##以Densenet 121为例[6,12,24,16],first Dense block 就是6个
            inner_channels += growth_rate * nblocks[idx]  
            
            #论文Compress 部分,在denseblock之后,让transition产生/theta=0.5 feature map
            out_channels = int(reduction * inner_channels)
            self.features.add(Transition(out_channels))
            inner_channels = out_channels
        #最后一个dense block  不需要transition 所以单独处理
        self.features.add(self._make_dense_layers(
            block, nblocks[len(nblocks)-1]))
        inner_channels += growth_rate * nblocks[len(nblocks) - 1]
        
        self.features.add(layers.BatchNormalization())
        self.features.add(layers.ReLU())

        self.gap = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes, activation='softmax')

    def _make_dense_layers(self, block, nblocks):
        dense_block = Sequential()
        for idx in range(nblocks):
            dense_block.add(block(self.growth_rate))
        return dense_block

    def call(self, inputs, training=False):
        x = self.conv1(inputs)
        x = self.features(x, training=training)
        x = self.gap(x)
        x = self.fc(x)
        return x


def densenet121(num_classes):
    return DenseNet(num_classes, BottleNeck, [6, 12, 24, 16], growth_rate=32)


def densenet169(num_classes):
    return DenseNet(num_classes, BottleNeck, [6, 12, 32, 32], growth_rate=32)


def densenet201(num_classes):
    return DenseNet(num_classes, BottleNeck, [6, 12, 48, 32], growth_rate=32)


def densenet161(num_classes):
    return DenseNet(num_classes, BottleNeck, [6, 12, 36, 24], growth_rate=48)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值