>- **🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/Z9yL_wt7L8aPOr9Lqb1K3w) 中的学习记录博客**
>- **🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)**
一:前期工作
1.导入数据
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
2.归一化
# 将像素的值标准化至0到1的区间内。
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images.shape,test_images.shape,train_labels.shape,test_labels.shape
3.可视化
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(20,10))
for i in range(20):
plt.subplot(5,10,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
二:构建CNN 网络
池化层:
池化层对提取到的特征信息进行降维,一方面使特征图变小,简化网络计算复杂度;另一方面进行特征压缩,提取主要特征,增加平移不变性,减少过拟合风险。但其实池化更多程度上是一种计算性能的一个妥协,强硬地压缩特征的同时也损失了一部分信息,所以现在的网络比较少用池化层或者使用优化后的如SoftPool。
池化层包括最大池化层(MaxPooling)和平均池化层(AveragePooling),均值池化对背景保留最好,最大池化对纹理提取最好。
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)), #卷积层1,卷积核3*3
layers.MaxPooling2D((2, 2)), #池化层1,2*2采样
layers.Conv2D(64, (3, 3), activation='relu'), #卷积层2,卷积核3*3
layers.MaxPooling2D((2, 2)), #池化层2,2*2采样
layers.Conv2D(64, (3, 3), activation='relu'), #卷积层3,卷积核3*3
layers.Flatten(), #Flatten层,连接卷积层与全连接层
layers.Dense(64, activation='relu'), #全连接层,特征进一步提取
layers.Dense(10) #输出层,输出预期结果
])
model.summary() # 打印网络结构
三:编译
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
四:训练模型
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
五:预测
import numpy as np
pre = model.predict(test_images)
print(class_names[np.argmax(pre[1])])
六:模型评估
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
七:总结
为了提高模型的准确率,尝试了使用数据增强,但是效果并不理想。
#应用数据增强
from tensorflow.keras.preprocessing.image import ImageDataGenerator # Changed import statement
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
)
datagen.fit(train_images)
history = model.fit(datagen.flow(train_images,train_labels),epochs=10,validation_data=(test_images,test_labels))
又尝试了将Adam优化器换为RMSprop,并增加了学习率调节器,模型的准确率达到了74%
from tensorflow.keras.callbacks import ReduceLROnPlateau
model.compile(optimizer='RMSprop',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=2, min_lr=1e-5, verbose=1)
history = model.fit(train_images, train_labels,
epochs=10, validation_data=(test_images, test_labels),
callbacks=[reduce_lr])