3.1.1.使用预训练的卷积神经网络(不使用数据增强的快速特征提取
下面展示一些
内联代码片
。
""" 3.1.1.使用预训练的卷积神经网络(不使用数据增强的快速特征提取) """
# 1.将VGG16卷积基实例化
from keras.applications import VGG16 # conv_base是一个VGG模型
conv_base = VGG16(weights = 'imagenet', # 指定模型初始化的权重检查点
include_top = False, # 是否包含密集连接分类器
input_shape = (150,150, 3))
# 2.使用预训练的卷积基提取特征(2-1:不使用数据增强的快速特征提取)
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'F:\datasets\dogs-vs-csts\猫狗大战' # 保存较小数据集的目录
train_dir = os.path.join(base_dir, 'train') # 划分后的训练集
validation_dir = os.path.join(base_dir, 'validation') # 划分后的验证集
test_dir = os.path.join(base_dir, 'test') # 划分后的测试集
datagen = ImageDataGenerator(rescale=1./255) # 将所有图像乘以1/255缩放
batch_size = 20 # 小批量
def extract_features(directory, sample_count): # 提取_特征(目录,样本数)
features = np.zeros(shape=(sample_count, 4, 4, 512)) # 4D (样本数, 4,4,512)
labels = np.zeros(shape=(sample_count)) # 2000个0
# 以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据
generator = datagen.flow_from_directory( # 读取图像的生成器
directory, # 目录
target_size = (150,150), # 整数tuple元组,默认为(256, 256). 图像将被resize成该尺寸
batch_size = batch_size, # batch数据的大小,默认32
class_mode = 'binary') # 颜色模式
i = 0
# 生成器:下面将每个批次的输出结果通过conv_pridect()写到特征矩阵中等待接下来的输入全连接层处理。
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch) # 获得输出结果用的是predict方法
# 提取特征数据是在将数据先输入conv_base后得到的结果数据,然后再将其保存为numpy数组
features[i * batch_size : (i+1) * batch_size] = features_batch
labels[i * batch_size : (i+1) * batch_size] = labels_batch
i += 1
if i*batch_size >= sample_count:
break # 注意,这些生成器在循环中不断生成数据,所以必须在读取完所有图像后终止循环
return features, labels
train_features, train_labels = extract_features(train_dir, 2000) # 提取的特征形状为:4D
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4*4*512)) # 将特征其展平为:2D 将数据铺平
validation_features = np.reshape(validation_features, (1000, 4*4*512))
test_features = np.reshape(test_features, (1000, 4*4*512))
# 3.定义并训练密集连接分类器
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4*4*512))
model.add(layers.Dropout(0.5)) # Dropout正则化
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer = optimizers.RMSprop(lr=2e-5), # 配置模型
loss = 'binary_crossentropy',
metrics = ['acc'])
history = model.fit(train_features, train_labels, # 训练数据
epochs = 30,
batch_size = 20,
validation_data = (validation_features, validation_labels)) # 验证数据
loss,accuracy = model.evaluate(test_features, test_labels) # 评估测试集
# 4.绘制训练过程中的损失曲线和精度曲线
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc)+1)
plt.plot(epochs, acc, 'r', label='Training acc') # 绘制训练过程中的精度曲线
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss') # 绘制训练过程中的损失曲线
plt.plot(epochs, val_loss, 'b', label='Validation val_loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()