第J3周:Densenet

🏡 我的环境:
● 语言环境:Python 3.8
● 编译器:Google Colab
● 深度学习环境:Tensorflow

一、 前期准备

1. 设置GPU

如果设备上支持GPU就使用GPU,否则使用CPU

import matplotlib.pyplot as plt
import numpy as np

import tensorflow as tf

gpus = tf.config.list_physical_devices("GPU")

if gpus:
    gpu0 = gpus[0]                                        #如果有多个GPU,仅使用第0个GPU
    tf.config.experimental.set_memory_growth(gpu0, True)  #设置GPU显存用量按需使用
    tf.config.set_visible_devices([gpu0],"GPU")

gpus

在这里插入图片描述

2. 导入数据

本地数据集位于./data/bird_photos/目录下

import os,PIL,random,pathlib

data_dir = r'/content/bird_photos'
data_dir = pathlib.Path(data_dir)
data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("/")[3] for path in data_paths]
classeNames

在这里插入图片描述

f = []
for root, dirs, files in os.walk(data_dir):
    for name in files:
        f.append(os.path.join(root, name))
print("图片总数:",len(f))

在这里插入图片描述

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    # transforms.RandomHorizontalFlip(), # 随机水平翻转
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = datasets.ImageFolder("./data/7-data/",transform=train_transforms)
total_data.class_to_idx

在这里插入图片描述

3. 划分数据集

train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_dataset, test_dataset
batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=1)
for X, y in test_dl:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break

在这里插入图片描述

4. 显示图片信息


#%%
import matplotlib.pyplot as plt
import numpy as np
def imageshow(images, labels=None):
    plt.figure(dpi=100,figsize=(12,4))
    for i in range(8):
        plt.subplot(3, 3, i+1)
        img = images[i].numpy().astype("uint8")
        plt.imshow(img)
    
        if labels is not None:
            plt.title(train_ds.class_names[labels[i]])
        plt.axis('off')
        plt.tight_layout(pad=0.5)
    plt.show()
dataiter = iter(train_ds)
images, labels = dataiter.next()
imageshow(images, labels)

在这里插入图片描述

二、手动搭建Desnet模型

1.模型

在这里插入图片描述

2.代码

#%%
''' Residual Block '''
class BottleNeck(keras.Model):
    def __init__(self, growth_rate, bn_size = 4, dropout = 0.3):
        super().__init__()
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation("relu"),
        self.conv1 = layers.Conv2D(filters=bn_size * growth_rate, kernel_size=(1, 1), 
                                   strides=1, padding='same')
        self.bn2 = layers.BatchNormalization()
        self.conv2 = layers.Conv2D(filters=growth_rate, kernel_size=(3, 3), 
                                   strides=1, padding='same')
        self.dropout = layers.Dropout(rate=dropout)

        self.listLayers = [
            self.bn1,
            self.relu,
            self.conv1,
            self.bn2,
            self.relu,
            self.conv2,
            self.dropout
            ]
        
    def call(self, x):
        tem = x
        for layer in self.listLayers.layers:
            x = layer(x)
        return layers.concatenate([tem, x], axis=-1)



class Transition(tf.keras.Model):
    def __init__(self, growth_rate):
        super().__init__()
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')
        self.conv1 = layers.Conv2D(filters = growth_rate, kernel_size=(1, 1),
                                   strides = 1, activation = 'relu', padding='same')
        self.pooling = layers.AveragePooling2D(pool_size=(2,2), strides = 2,padding='same')

        self.listLayers = [
            self.bn1,
            self.relu,
            self.conv1,
            self.pooling
        ]
    def call(self,x):
        for layer in self.listLayers.layers:
            x = layer(x)
        return x

class DenseBlock(tf.keras.Model):
    def __init__(self, num_layer, growth_rate, bn_size = 4, dropout = 0.3, efficient=False):
        super().__init__()
        self.efficient = efficient
        self.listLayers = []
        if self.efficient:
            _x = tf.recompute_grad(BottleNeck(growth_rate, bn_size = bn_size, dropout = dropout))
        else:_x =BottleNeck(growth_rate, bn_size = bn_size, dropout = dropout)
        for _ in range(num_layer):
            self.listLayers.append(BottleNeck(growth_rate, bn_size = bn_size, dropout = dropout))
    
    def call(self, x):
        for layer in self.listLayers.layers:
            x = layer(x)
        return x


class DenseNet(tf.keras.Model):
    def __init__(self, num_init_feature, growth_rate, block_config, num_classes, 
                 bn_size=4, dropout=0.3, compression_rate=0.5, efficient=False):
        super().__init__()
        self.num_channels = num_init_feature
        self.conv = layers.Conv2D(filters = num_init_feature, kernel_size=7,
                                strides = 2, padding='same')
        self.bn = layers.BatchNormalization()
        self.relu = layers.Activation('relu')
        self.max_pool = layers.MaxPool2D(pool_size=3, strides=2, padding='same')

        self.dense_block_layers = []
        for i in block_config[:-1]:
            self.dense_block_layers.append( DenseBlock(num_layer =i, growth_rate = growth_rate,
                                                 bn_size = bn_size, dropout = dropout, efficient=efficient))
            self.num_channels = compression_rate * (self.num_channels + growth_rate * i)
            self.dense_block_layers.append( Transition(self.num_channels))
            
        self.dense_block_layers.append( DenseBlock(num_layer =block_config[-1], growth_rate = growth_rate,
                                             bn_size = bn_size, dropout = dropout, efficient=efficient))
        
        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = tf.keras.layers.Dense(units=num_classes, activation=tf.keras.activations.softmax)
            
    def call(self,x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        x = self.max_pool(x)
        
        for layer in self.dense_block_layers.layers:
            x = layer(x)

        
        x = self.avgpool(x)
        return self.fc(x)

model = DenseNet(num_init_feature=64,
                 growth_rate=32,
                 block_config=[6,12,24,16],
                 compression_rate=0.5,
                 num_classes = 4,
                 dropout=0.0,
                 efficient=True)

x = tf.random.normal((1,224,224,3))
for layer in model.layers:
    x = layer(x)
    print(layer.name, 'output shape:\t', x.shape)
''' 显示网络结构 '''

在这里插入图片描述

三、训练模型

1. 编写训练和测试函数

AUTOTUNE = tf.data.AUTOTUNE

train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

import tensorflow as tf 
gpus = tf.config.list_physical_devices("GPU")

opt = tf.keras.optimizers.legacy.Adam(learning_rate=0.002,decay=0.01)

model.compile(optimizer=opt,
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

epochs = 200
history = model.fit(
  train_ds,
  validation_data=val_ds,
  epochs=epochs
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

2.正式训练

在这里插入图片描述
在这里插入图片描述

3.指定图片预测

import numpy as np
plt.figure(figsize=(18,3))
plt.suptitle('预测结果展示')

for images,label in val_ds.take(1):
    for i in range(8):
      ax=plt.subplot(1, 8, i+1)
      
      plt.imshow(images[i].numpy().astype("uint8"))
      img_array=tf.expand_dims(images[i],0)
      predictions=model.predict(img_array)
    
      plt.title(classeNames[np.argmax(predictions)])
      plt.axis('off')
   

在这里插入图片描述

总结

DenseNet相比ResNet,拥有更密集的连接机制:互相连接所有层,即元素叠加。这可以实现特征重用,提高效率。
简单来讲,ResNet是对应位置上进行求和,最终不会改变通道数量的;DenseNet而后者是通道合并,也就是说值不变,直接堆砌在一起
在这里插入图片描述
在这里插入图片描述

DesNet网络结构:DenseBlock+transistion
在这里插入图片描述DesBlock结构:BN+RELU+3x3Conv
在这里插入图片描述DenseNet-B结构: BN+RELU+1x1+Conv+BN+ReLU+3x3 Conv
在这里插入图片描述
Transition结构:BN+ReLU+1x1Conv+2x2 AvgPooling
DenseNet结构(ImageNet数据集)
在这里插入图片描述
算法比较:在这里插入图片描述

参考

Desnet实战

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值