DenseNet中的dense_block模块的实现

运行环境:tensorflow2.1+python3.7

代码如下:

#denseNet121的实现
import tensorflow as tf
from tensorflow.keras.layers import Dense,Conv2D,BatchNormalization,AveragePooling2D,Flatten
from tensorflow.keras.layers import MaxPooling2D,Activation,Concatenate,GlobalAveragePooling2D
from tensorflow.keras import Input,Model

def dense_block(x,blocks):
    for i in range(blocks):
        x=conv2d_block(x,32)
    return x
def conv2d_block(x,grow_rate):
    #在tf中,axis是从0开始计算的,3表示channel层
    bn_axis=3
    
    x1=BatchNormalization(axis=bn_axis,epsilon=1.0001e-5)(x)
    x1=Activation('relu')(x1)
    x1=Conv2D(grow_rate*4,(1,1),padding='same')(x1)
    
    x1=BatchNormalization(axis=bn_axis,epsilon=1.0001e-5)(x1)
    x1=Activation('relu')(x1)
    x1=Conv2D(grow_rate,(3,3),padding='same')(x1)
    
    x=Concatenate()([x,x1])
    return x

def transition_block(x,reduction):
    
    bn_axis=3
    x=BatchNormalization(axis=bn_axis,epsilon=1.001e-5)(x)
    x=Activation('relu')(x)
    
    x=Conv2D(int(x.shape[bn_axis]*reduction),(1,1),padding='same')(x)
    x=AveragePooling2D(strides=(2,2))(x)
    
    return x

def denseNet(inputs,blocks):
    x=Conv2D(24,(7,7),padding='same',strides=2)(inputs)
    x=MaxPooling2D(pool_size=(3,3),strides=2,padding='same')(x)
    for i,block in enumerate(blocks):
        x=dense_block(x,block)
        if i!=len(blocks)-1:
            x=transition_block(x,0.5)
    x=GlobalAveragePooling2D()(x)
    x=Flatten()(x)
    x=Dense(1000,activation='relu')(x)
    return x

inputs=Input([224,224,3])
blocks=[6,12,24,16]
#x=dense_block(inputs,6,"dense")
#outputs=transition_block(x,0.5,"dense")


outputs=denseNet(inputs,blocks)
model=Model(inputs,outputs,name="densenet")
model.summary()

运行结果:

"""
Model: "densenet"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 224, 224, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 112, 112, 24) 3552        input_1[0][0]                    
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 56, 56, 24)   0           conv2d[0][0]                     
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 56, 56, 24)   96          max_pooling2d[0][0]              
__________________________________________________________________________________________________
activation (Activation)         (None, 56, 56, 24)   0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 56, 56, 128)  3200        activation[0][0]                 
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 56, 56, 128)  512         conv2d_1[0][0]                   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 56, 56, 128)  0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 56, 56, 32)   36896       activation_1[0][0]               
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 56, 56, 56)   0           max_pooling2d[0][0]              
                                                                 conv2d_2[0][0]                   
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 56, 56, 56)   224         concatenate[0][0]                
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 56, 56, 56)   0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 56, 56, 128)  7296        activation_2[0][0]               
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 56, 56, 128)  512         conv2d_3[0][0]                   
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 56, 56, 128)  0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 56, 56, 32)   36896       activation_3[0][0]               
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 56, 56, 88)   0           concatenate[0][0]                
                                                                 conv2d_4[0][0]                   
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 56, 56, 88)   352         concatenate_1[0][0]              
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 56, 56, 88)   0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 56, 56, 128)  11392       activation_4[0][0]               
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 56, 56, 128)  512         conv2d_5[0][0]                   
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 56, 56, 128)  0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 56, 56, 32)   36896       activation_5[0][0]               
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 56, 56, 120)  0           concatenate_1[0][0]              
                                                                 conv2d_6[0][0]                   
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 56, 56, 120)  480         concatenate_2[0][0]              
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 56, 56, 120)  0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 56, 56, 128)  15488       activation_6[0][0]               
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 56, 56, 128)  512         conv2d_7[0][0]                   
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 56, 56, 128)  0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 56, 56, 32)   36896       activation_7[0][0]               
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 56, 56, 152)  0           concatenate_2[0][0]              
                                                                 conv2d_8[0][0]                   
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 56, 56, 152)  608         concatenate_3[0][0]              
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 56, 56, 152)  0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 56, 56, 128)  19584       activation_8[0][0]               
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 56, 56, 128)  512         conv2d_9[0][0]                   
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 56, 56, 128)  0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 56, 56, 32)   36896       activation_9[0][0]               
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 56, 56, 184)  0           concatenate_3[0][0]              
                                                                 conv2d_10[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 56, 56, 184)  736         concatenate_4[0][0]              
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 56, 56, 184)  0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 56, 56, 128)  23680       activation_10[0][0]              
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 56, 56, 128)  512         conv2d_11[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 56, 56, 128)  0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 56, 56, 32)   36896       activation_11[0][0]              
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 56, 56, 216)  0           concatenate_4[0][0]              
                                                                 conv2d_12[0][0]                  
"""

在上述结果中的concatenate_5,他的输出是concatenate_4+conv2d_12,也就是dense_block中的密集连接(在这里有所体现,也是核心所作)。

注:input layer之后的7*7卷积层的卷积层数为24,论文中提到应设为2*k

参考资料:

https://blog.csdn.net/weixin_44791964/article/details/105472196

DenseNet添加SE(Squeeze-and-Excitation)模块可以通过以下步骤实现: 1. 定义SE模块函数: ``` import tensorflow as tf def se_block(input_tensor, compress_rate=16): # 获取输入张量的形状 input_shape = input_tensor.get_shape().as_list() # 计算通道数 num_channels = input_shape[-1] # 压缩通道数 reduced_channels = num_channels // compress_rate # Squeeze操作:全局平均池化 squeeze = tf.reduce_mean(input_tensor, [1, 2]) # Excitation操作:全连接层 excitation = tf.layers.dense(squeeze, reduced_channels, activation=tf.nn.relu) excitation = tf.layers.dense(excitation, num_channels, activation=tf.nn.sigmoid) # Reshape张量 excitation = tf.reshape(excitation, [-1, 1, 1, num_channels]) # Scale操作:原始张量与Excitation张量相乘 scale = input_tensor * excitation return scale ``` 2. 在DenseBlock应用SE模块: ``` def dense_block(input_tensor, num_layers, growth_rate, bottleneck_width=4): # 存储所有密集块的特征映射 features = [input_tensor] # 创建num_layers个密集层 for i in range(num_layers): # 计算Bottleneck层的输出通道数 bottleneck_channels = growth_rate * bottleneck_width # BN-ReLU-Conv1x1-BN-ReLU-Conv3x3 x = batch_norm_relu(features[-1]) x = conv2d(x, bottleneck_channels, kernel_size=1) x = batch_norm_relu(x) x = conv2d(x, growth_rate, kernel_size=3) # 应用SE模块 x = se_block(x) # 将当前层的输出添加到特征列表 features.append(x) # 将所有特征映射串联在一起 output_tensor = tf.concat(features, axis=-1) return output_tensor ``` 注意:在应用SE模块时,应该添加在密集块的最后一个卷积层之后。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值