Keras+VGG16特征图可视化
一、VGG16结构理解
1. 可视化结构图
2. VGGNet各级别网络结构图
3. VGG16网络结构图
总结:三种不同的形式,方便大家对VGG16架构有更为直观的认识。
二、Keras实现VGG16
代码实现
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 09:15:36 2020
@author: wuzhendong
"""
import keras
#cifar10数据集:60000张彩色图像,这些图像是32*32,分为10个类,每类6000张图
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.optimizers import SGD
from keras import regularizers
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
#该数据集需要自动下载,大小约为163M,若下载失败可手动下载
#下载链接:https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
#训练集和验证集7/3分
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3)
#用于正则化时权重降低的速度
weight_decay = 0.0005 #权重衰减(L2正则化),作用是避免过拟合
nb_epoch=50 #50轮
batch_size=32 #每轮32张图
#layer1 32*32*3
model = Sequential()
#第一层 卷积层 的卷积核的数目是32 ,卷积核的大小是3*3,keras卷积层stride默认是1*1
#对于stride=1*1,padding ='same',这种情况卷积后的图像shape与卷积前相同,本层后shape还是32*32
model.add(Conv2D(64, (3, 3), padding='same',strides=(1, 1),
input_shape=(32,32,3),kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
#进行一次归一化
model.add(BatchNormalization())
model.add(Dropout(0.3))
#layer2 32*32*64
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#keras 池化层 stride默认是2*2, padding默认是valid,输出的shape是16*16*64
model.add(MaxPooling2D(pool_size=(2, 2),strides=(2,2),padding='same'))
#layer3 16*16*64
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer4 16*16*128
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer5 8*8*128
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer6 8*8*256
model.add(Conv2D(256, (