# 卷积神经网络初体验
import os, shutil
from datetime import datetime
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
def data_initial():
# 原始数据集解压目录
original_dataset_dir = r'D:\pycharm\DownloadData\cats_vs_dogs\train\train'
# 小数据集保存目录
base_dir = r'D:\pycharm\DownloadData\cats_vs_dogs\mini'
# os.mkdir(base_dir)
# 创建训练集,验证集,测试集目录
train_dir = os.path.join(base_dir, 'train')
# os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
# os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
# os.mkdir(test_dir)
# 猫的训练目录
train_cats_dir = os.path.join(train_dir, 'cats')
# os.mkdir(train_cats_dir)
# 狗的训练目录
train_dogs_dir = os.path.join(train_dir, 'dogs')
# os.mkdir(train_dogs_dir)
# 猫的验证目录
validation_cats_dir = os.path.join(validation_dir, 'cats')
# os.mkdir(validation_cats_dir)
# 狗的验证目录
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# os.mkdir(validation_dogs_dir)
# 猫的测试目录
test_cats_dir = os.path.join(test_dir, 'cats')
# os.mkdir(test_cats_dir)
# 狗的测试目录
test_dogs_dir = os.path.join(test_dir, 'dogs')
# os.mkdir(test_dogs_dir)
# 1000张猫的训练集
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# 500张猫的验证集
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# 500张猫的测试集
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# 1000张狗的训练集
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# 1000张狗的验证集
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# 1000张狗的测试集
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
# 检查
print('total train_cat: ', len(os.listdir(train_cats_dir)))
print('total validation_cat: ', len(os.listdir(validation_cats_dir)))
print('total test_cat: ', len(os.listdir(test_cats_dir)))
print('total train_dog: ', len(os.listdir(train_dogs_dir)))
print('total validation_dog: ', len(os.listdir(validation_dogs_dir)))
print('total test_dog: ', len(os.listdir(test_dogs_dir)))
# 定义缩放比例
train_datagen = ImageDataGenerator(rescale=1. / 255)
validate_datagen = ImageDataGenerator(rescale=1. / 255)
# 数据增强,对数据中的图片进行随机变换,使得数据中不存在相同的图片,增强模型的泛化性能,也是消除过拟合的一种方法。
augmented_data_gen = ImageDataGenerator(
rescale=1. / 255,
# 旋转角度40度
rotation_range=40,
# 水平平移0.2
width_shift_range=0.2,
# 垂直平移
height_shift_range=0.2,
# 错切变换
shear_range=0.2,
# 缩放
zoom_range=0.2,
# 水平翻转
horizontal_flip=True
)
train_generator = augmented_data_gen.flow_from_directory(
directory=train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary'
)
validation_generator = validate_datagen.flow_from_directory(
directory=validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary'
)
return train_generator, validation_generator
# 创建模型
def create_convnet_model():
model = models.Sequential()
# 2D卷积层,32个过滤器,(3*3)卷积核,relu激活函数,输入形状为(150高*150宽*3通道)
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
# 2D最大池化
model.add(layers.MaxPooling2D((2, 2)))
# 64过滤器卷积层
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D((2, 2)))
# 128过滤器卷积层
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D((2, 2)))
# 128过滤器卷积层
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# 最大池化
model.add(layers.MaxPooling2D((2, 2)))
# 展平
model.add(layers.Flatten())
# Dropout层,尽管使用了数据增强,但变换后的数据仍然与原来的数据有高度相关,添加Dropout层,取消上层一般神经元的输入,降低过拟合
model.add(layers.Dropout(0.5))
# 全连接层
model.add(layers.Dense(512, activation='relu'))
# 二分类层
model.add(layers.Dense(1, activation='sigmoid'))
# 输出模型
model.summary()
return model
if __name__ == '__main__':
# 数据预处理,返回批量生成器
train_generator, validation_generator = data_initial()
model = create_convnet_model()
# 编译模型
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
# 开始拟合,计时
start_time = datetime.now()
# 使用批量生成器拟合模型
history = model.fit_generator(
# train_generator,
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50
)
finish_time = datetime.now()
print('运行时间: ', (finish_time - start_time).seconds)
# 保存模型
model.save('cats_vs_dogs_small.h5')
print(history.history)
# 绘制损失曲线和精度曲线
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
相较于上次的训练,这次加入了数据增强和Dropout正则化,尽管Dropout的比例相当高,模型还是收敛到了75%正确率左右,比上次的准确率提高了5个点