Densenet复现过程

Densenet(121)复现

一.Densenet简介

​ 这部分就不说多了,复现过后,个人认为Densenet相较于Resnet,最大的优点就是进一步减少了信息的缺失。结构上的创新就有dense_block和trainsition_block等结构,有关于其的一些名词解释,都在我的代码中(英文太差,还请各位看官原谅)。

二.复现过程(Based on 102flowers)

在这里插入图片描述

数据准备部分

from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array
from keras.utils.np_utils import to_categorical
import numpy as np
from PIL import Image
import os


def readfile(img_path):
    img = Image.open(img_path)
    img = img.resize((224, 224))
    img_array = img_to_array(img)
    img_array = np.resize(img_array, (224, 224, 3))/255.0
    return img_array


def get_data():
    main_path = 'D:/data/oxford 102flowers/102flowers_data'
    train_generator = ImageDataGenerator()
    valid_generator = ImageDataGenerator()
    test_generator = ImageDataGenerator()

    train_gain = train_generator.flow_from_directory(main_path + '/' + 'train')
    valid_gain = valid_generator.flow_from_directory(main_path + '/' + 'valid')
    test_gain = test_generator.flow_from_directory(main_path + '/' + 'test')

    return train_gain, valid_gain, test_gain


def get_train_data():
    x_train = []
    y_train = []
    main_path = 'D:/data/oxford 102flowers/102flowers_data/train'
    classes = os.listdir(main_path)
    for class_name in classes:
        class_path = main_path + '/' + class_name
        for img_name in os.listdir(class_path):
            img_path = class_path + '/' + img_name
            img_array = readfile(img_path)
            x_train.append(img_array)
            y_train.append(class_name)
            print("Image {} is read successfully.".format(img_name[:-4]))
    x_train = np.array(x_train)
    y_train = np.array(y_train)
    y_train = to_categorical(y_train)

    return x_train, y_train

我需要的一些子结构

在这里插入图片描述

在这里插入图片描述

from keras.layers import *
from keras import backend

def conv_block(conv_block_inputs, k, block_name):
    '''
    The common residual part,is applied to avoid to lose too much features and information.
    '''
    bn_axis = 3
    bn_epsilon = 1.001e-5
    x = BatchNormalization(axis=bn_axis, epsilon=bn_epsilon, name=block_name+'-bn1')(conv_block_inputs)
    x = Activation('relu', name=block_name+'-relu1')(x)
    # set noisy(bias) none.
    x = Conv2D(4*k, 1, use_bias=False, name=block_name+'-conv1')(x)

    x = BatchNormalization(axis=bn_axis, epsilon=bn_epsilon, name=block_name+'-bn2')(x)
    x = Activation('relu', name=block_name+'-relu2')(x)
    x = Conv2D(k, 3, padding='same', use_bias=False, name=block_name+'-conv2')(x)
    # residual part(AKA:”残差“)
    x = Concatenate(axis=bn_axis, name=block_name+'-concat')([conv_block_inputs, x])
    '''
    You need to input a list of tensors whose shape is same and then it will return 
    a single tensor that can be considered as the addition of all tensors in list.
    '''

    return x


def dense_block(dense_block_inputs, blocks_num, name_of_block):
    '''
    "dense_block" is applied to connect conv_block and concatenate(AKA:”整合“)
     their features.
    '''
    # "blocks_num" recurrence procession
    for i in range(blocks_num):
        dense_block_inputs = conv_block(conv_block_inputs=dense_block_inputs, k=32, block_name=name_of_block+'-block'+ str(i+1))
        # Every time it finishes "conv_block",the procession "Concatenate" in
        # conv_block will accumulate the output of every procession before.
    dense_block_ouputs = dense_block_inputs

    return dense_block_ouputs


# (trainsition AKA:"培训")
def trainsition_block(trainsition_block_inputs, reduction, name_of_block):
    '''
    "trainsition_block" is applied to connect all the densenet_block and get the feature of
    last densnet_block.Meanwhile,it can also reduce the width and height of the last densenet_block.
    '''
    bn_axis = 3
    bn_epsilon = 1.001e-5
    x = BatchNormalization(axis=bn_axis, epsilon=bn_epsilon, name=name_of_block+'-bn')(trainsition_block_inputs)
    x = Activation('relu', name=name_of_block+'-relu')(x)
    x = Conv2D(int(backend.int_shape(x)[bn_axis]*reduction), 1, use_bias=False, name=name_of_block+'-conv')(x)
    x = AveragePooling2D(2, strides=2, name=name_of_block+'-pool')(x)

    return x

模型构建(下面的可选模型类型代码部分借鉴了这位大佬的代码)

from keras import *
from blocks import *
from keras.optimizers import *


def Densenet(dense_blocks_num_list, input_shpae, classes):
    bn_axis = 3
    bn_epsilon = 1.001e-5
    inputs = Input(shape=input_shpae)
    x = ZeroPadding2D(padding=((3, 3), (3, 3)))(inputs)
    x = Conv2D(64, 7, strides=2, use_bias=False, name="conv_layer1-conv1")(x)
    x = BatchNormalization(axis=bn_axis, epsilon=bn_epsilon, name='conv_layer1-bn')(x)
    x = Activation('relu', name='conv_layer1-relu')(x)

    x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = MaxPooling2D(3, strides=2, name='mp1')(x)

    x = dense_block(x, dense_blocks_num_list[0], name_of_block='db1')
    x = trainsition_block(x, 0.5, name_of_block='tb1')

    x = dense_block(x, dense_blocks_num_list[1], name_of_block='db2')
    x = trainsition_block(x, 0.5, name_of_block='tb2')

    x = dense_block(x, dense_blocks_num_list[2], name_of_block='db3')
    x = trainsition_block(x, 0.5, name_of_block='tb3')

    x = dense_block(x, dense_blocks_num_list[3], name_of_block='db4')
    x = trainsition_block(x, 0.5, name_of_block='tb4')

    x = GlobalAveragePooling2D(name='global_avg_pool')(x)
    x = Dense(units=classes, activation='softmax', name='fc')(x)

    if dense_blocks_num_list == [6, 12, 24, 16]:
        model = Model(inputs=inputs, outputs=x, name='densenet121')
    elif dense_blocks_num_list == [6, 12, 32, 32]:
        model = Model(inputs=inputs, outputs=x, name='densenet169')
    elif dense_blocks_num_list == [6, 12, 48, 32]:
        model = Model(inputs=inputs, outputs=x, name='densenet201')
    else:
        model = Model(inputs=inputs, outputs=x, name='densenet')

    model.compile(
        metrics=['accuracy'],
        loss='categorical_crossentropy',
        optimizer=Adam(1e-3)
    )
    model.summary()
    model.save('./Densenet.h5')
    return model

main文件部分

![屏幕截图 2021-05-11 221703](C:\Users\22704\PycharmProjects\Densenet\img\屏幕截图 2021-05-11 221703.png)from data_ready import get_train_data
from model import Densenet
from keras.layers import *
from keras.callbacks import TensorBoard

model = Densenet(dense_blocks_num_list=[6, 12, 24, 16], input_shpae=(224, 224, 3), classes=102)
x_train, y_train = get_train_data()
ts = TensorBoard(log_dir='./tensorboard')
model.fit(x_train, y_train, epochs=30, batch_size=16, callbacks=[ts])
model.save('./Densenet121.h5')

结果:

在这里插入图片描述

三.总结一下优缺点

优点:

  • 减弱了梯度消失的风险
  • 加强了feature传递,减弱了信息的缺失
  • 一定减少了参数数量

缺点(其他缺点,能力和时间有限,没搞出来=-=):

  • 感觉挺费显存的(博主跑了16的batch_size)
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值