from tensorflow import keras
from tensorflow.keras import layers,models
import os, PIL, pathlib
import matplotlib.pyplot as plt
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
if gpus:
gpu0 = gpus[0] #如果有多个GPU,仅使用第0个GPU
tf.config.experimental.set_memory_growth(gpu0, True) #设置GPU显存用量按需使用
tf.config.set_visible_devices([gpu0],"GPU")
gpus
[]
导入数据
data_dir = r"C:\Users\11054\Desktop\kLearning\t5_learning\data"
data_dir = pathlib.Path(data_dir)
data_dir
WindowsPath('C:/Users/11054/Desktop/kLearning/t5_learning/data')
查看数据
image_count = len(list(data_dir.glob('*/*/*.jpg')))
print("图片总数为:",image_count)
图片总数为: 578
roses = list(data_dir.glob('train/nike/*.jpg'))
PIL.Image.open(str(roses[0]))
数据预处理
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
r"C:\Users\11054\Desktop\kLearning\t5_learning\data\train",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
r"C:\Users\11054\Desktop\kLearning\t5_learning\data\test",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 502 files belonging to 2 classes.
Found 76 files belonging to 2 classes.
class_names = train_ds.class_names
print(class_names)
['adidas', 'nike']
数据可视化
plt.figure(figsize=(20, 10))
for images, labels in train_ds.take(1):
for i in range(20):
ax = plt.subplot(5, 10, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
(32, 224, 224, 3)
(32,)
# 配置数据集
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
构建模型
"""
关于卷积核的计算不懂的可以参考文章:https://blog.csdn.net/qq_38251616/article/details/114278995
layers.Dropout(0.4) 作用是防止过拟合,提高模型的泛化能力。
关于Dropout层的更多介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/115826689
"""
model = models.Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)), # 卷积层1,卷积核3*3
layers.AveragePooling2D((2, 2)), # 池化层1,2*2采样
layers.Conv2D(32, (3, 3), activation='relu'), # 卷积层2,卷积核3*3
layers.AveragePooling2D((2, 2)), # 池化层2,2*2采样
layers.Dropout(0.1),
layers.Conv2D(64, (3, 3), activation='relu'), # 卷积层3,卷积核3*3
layers.Dropout(0.1),
layers.Flatten(), # Flatten层,连接卷积层与全连接层
layers.Dense(128, activation='relu'), # 全连接层,特征进一步提取
layers.Dense(len(class_names)) # 输出层,输出预期结果
])
model.summary() # 打印网络结构
Model: "sequential_3"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ rescaling_3 (Rescaling) │ (None, 224, 224, 3) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_9 (Conv2D) │ (None, 222, 222, 16) │ 448 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ average_pooling2d_6 │ (None, 111, 111, 16) │ 0 │ │ (AveragePooling2D) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_10 (Conv2D) │ (None, 109, 109, 32) │ 4,640 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ average_pooling2d_7 │ (None, 54, 54, 32) │ 0 │ │ (AveragePooling2D) │ │ │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout_6 (Dropout) │ (None, 54, 54, 32) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ conv2d_11 (Conv2D) │ (None, 52, 52, 64) │ 18,496 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dropout_7 (Dropout) │ (None, 52, 52, 64) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ flatten_3 (Flatten) │ (None, 173056) │ 0 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_6 (Dense) │ (None, 128) │ 22,151,296 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_7 (Dense) │ (None, 2) │ 258 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 22,175,138 (84.59 MB)
Trainable params: 22,175,138 (84.59 MB)
Non-trainable params: 0 (0.00 B)
训练模型
# 设置初始学习率
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=10, # 敲黑板!!!这里是指 steps,不是指epochs
decay_rate=0.92, # lr经过一次衰减就会变成 decay_rate*lr
staircase=True)
# 将指数衰减学习率送入优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
# 保存最佳模型参数
checkpointer = ModelCheckpoint('best_model.weights.h5',
monitor='val_accuracy',
verbose=1,
save_best_only=True,
save_weights_only=True)
# 设置早停
earlystopper = EarlyStopping(monitor='val_accuracy',
min_delta=0.001,
patience=20,
verbose=1)
训练模型
history = model.fit(train_ds,
validation_data=val_ds,
epochs=epochs,
callbacks=[checkpointer, earlystopper])
Epoch 1/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 228ms/step - accuracy: 0.5029 - loss: 0.8578
Epoch 1: val_accuracy improved from -inf to 0.50000, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m5s[0m 261ms/step - accuracy: 0.5032 - loss: 0.8554 - val_accuracy: 0.5000 - val_loss: 0.7766
Epoch 2/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.5695 - loss: 0.6942
Epoch 2: val_accuracy improved from 0.50000 to 0.53947, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 244ms/step - accuracy: 0.5693 - loss: 0.6936 - val_accuracy: 0.5395 - val_loss: 0.6848
Epoch 3/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 221ms/step - accuracy: 0.5580 - loss: 0.6858
Epoch 3: val_accuracy improved from 0.53947 to 0.55263, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 251ms/step - accuracy: 0.5578 - loss: 0.6855 - val_accuracy: 0.5526 - val_loss: 0.6821
Epoch 4/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.5847 - loss: 0.6701
Epoch 4: val_accuracy improved from 0.55263 to 0.67105, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 246ms/step - accuracy: 0.5837 - loss: 0.6699 - val_accuracy: 0.6711 - val_loss: 0.6243
Epoch 5/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 225ms/step - accuracy: 0.6549 - loss: 0.6197
Epoch 5: val_accuracy did not improve from 0.67105
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 233ms/step - accuracy: 0.6556 - loss: 0.6199 - val_accuracy: 0.6711 - val_loss: 0.6184
Epoch 6/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 230ms/step - accuracy: 0.6938 - loss: 0.5970
Epoch 6: val_accuracy did not improve from 0.67105
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 239ms/step - accuracy: 0.6935 - loss: 0.5974 - val_accuracy: 0.6711 - val_loss: 0.6285
Epoch 7/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 228ms/step - accuracy: 0.7146 - loss: 0.5909
Epoch 7: val_accuracy improved from 0.67105 to 0.71053, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 255ms/step - accuracy: 0.7156 - loss: 0.5906 - val_accuracy: 0.7105 - val_loss: 0.5960
Epoch 8/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 232ms/step - accuracy: 0.7821 - loss: 0.5543
Epoch 8: val_accuracy did not improve from 0.71053
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 239ms/step - accuracy: 0.7808 - loss: 0.5546 - val_accuracy: 0.6842 - val_loss: 0.5885
Epoch 9/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 227ms/step - accuracy: 0.7684 - loss: 0.5563
Epoch 9: val_accuracy did not improve from 0.71053
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 234ms/step - accuracy: 0.7685 - loss: 0.5554 - val_accuracy: 0.7105 - val_loss: 0.5659
Epoch 10/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 228ms/step - accuracy: 0.7922 - loss: 0.5188
Epoch 10: val_accuracy improved from 0.71053 to 0.73684, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 251ms/step - accuracy: 0.7916 - loss: 0.5193 - val_accuracy: 0.7368 - val_loss: 0.5595
Epoch 11/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 223ms/step - accuracy: 0.8114 - loss: 0.5166
Epoch 11: val_accuracy did not improve from 0.73684
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 232ms/step - accuracy: 0.8111 - loss: 0.5160 - val_accuracy: 0.7368 - val_loss: 0.5358
Epoch 12/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 222ms/step - accuracy: 0.7697 - loss: 0.5143
Epoch 12: val_accuracy improved from 0.73684 to 0.75000, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 247ms/step - accuracy: 0.7713 - loss: 0.5131 - val_accuracy: 0.7500 - val_loss: 0.5308
Epoch 13/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 226ms/step - accuracy: 0.8117 - loss: 0.4815
Epoch 13: val_accuracy did not improve from 0.75000
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 234ms/step - accuracy: 0.8115 - loss: 0.4816 - val_accuracy: 0.7500 - val_loss: 0.5233
Epoch 14/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8355 - loss: 0.4610
Epoch 14: val_accuracy improved from 0.75000 to 0.77632, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 243ms/step - accuracy: 0.8344 - loss: 0.4612 - val_accuracy: 0.7763 - val_loss: 0.5115
Epoch 15/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 215ms/step - accuracy: 0.8289 - loss: 0.4536
Epoch 15: val_accuracy did not improve from 0.77632
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 223ms/step - accuracy: 0.8279 - loss: 0.4543 - val_accuracy: 0.7763 - val_loss: 0.5086
Epoch 16/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 222ms/step - accuracy: 0.8153 - loss: 0.4517
Epoch 16: val_accuracy did not improve from 0.77632
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 229ms/step - accuracy: 0.8163 - loss: 0.4515 - val_accuracy: 0.7763 - val_loss: 0.5135
Epoch 17/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8438 - loss: 0.4385
Epoch 17: val_accuracy did not improve from 0.77632
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 226ms/step - accuracy: 0.8435 - loss: 0.4386 - val_accuracy: 0.7500 - val_loss: 0.4939
Epoch 18/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8174 - loss: 0.4510
Epoch 18: val_accuracy did not improve from 0.77632
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8187 - loss: 0.4501 - val_accuracy: 0.7632 - val_loss: 0.5280
Epoch 19/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 221ms/step - accuracy: 0.8238 - loss: 0.4512
Epoch 19: val_accuracy did not improve from 0.77632
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 229ms/step - accuracy: 0.8247 - loss: 0.4503 - val_accuracy: 0.7632 - val_loss: 0.4901
Epoch 20/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 216ms/step - accuracy: 0.8524 - loss: 0.4141
Epoch 20: val_accuracy improved from 0.77632 to 0.78947, saving model to best_model.weights.h5
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 240ms/step - accuracy: 0.8515 - loss: 0.4149 - val_accuracy: 0.7895 - val_loss: 0.4970
Epoch 21/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.8385 - loss: 0.4290
Epoch 21: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 228ms/step - accuracy: 0.8392 - loss: 0.4285 - val_accuracy: 0.7763 - val_loss: 0.4955
Epoch 22/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 221ms/step - accuracy: 0.8750 - loss: 0.3984
Epoch 22: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 228ms/step - accuracy: 0.8739 - loss: 0.3995 - val_accuracy: 0.7763 - val_loss: 0.4927
Epoch 23/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 218ms/step - accuracy: 0.8294 - loss: 0.4102
Epoch 23: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 226ms/step - accuracy: 0.8300 - loss: 0.4105 - val_accuracy: 0.7632 - val_loss: 0.4875
Epoch 24/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 223ms/step - accuracy: 0.8639 - loss: 0.3967
Epoch 24: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 231ms/step - accuracy: 0.8637 - loss: 0.3973 - val_accuracy: 0.7632 - val_loss: 0.4969
Epoch 25/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 218ms/step - accuracy: 0.8913 - loss: 0.3895
Epoch 25: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 225ms/step - accuracy: 0.8895 - loss: 0.3908 - val_accuracy: 0.7632 - val_loss: 0.4881
Epoch 26/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 218ms/step - accuracy: 0.8366 - loss: 0.4169
Epoch 26: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8379 - loss: 0.4165 - val_accuracy: 0.7763 - val_loss: 0.4906
Epoch 27/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 226ms/step - accuracy: 0.8447 - loss: 0.4207
Epoch 27: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 234ms/step - accuracy: 0.8454 - loss: 0.4198 - val_accuracy: 0.7632 - val_loss: 0.4879
Epoch 28/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 223ms/step - accuracy: 0.8560 - loss: 0.3943
Epoch 28: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 233ms/step - accuracy: 0.8560 - loss: 0.3950 - val_accuracy: 0.7763 - val_loss: 0.4884
Epoch 29/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 217ms/step - accuracy: 0.8548 - loss: 0.4052
Epoch 29: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 225ms/step - accuracy: 0.8559 - loss: 0.4051 - val_accuracy: 0.7632 - val_loss: 0.4939
Epoch 30/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8688 - loss: 0.4032
Epoch 30: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8686 - loss: 0.4028 - val_accuracy: 0.7632 - val_loss: 0.4857
Epoch 31/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 221ms/step - accuracy: 0.8322 - loss: 0.4269
Epoch 31: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 229ms/step - accuracy: 0.8347 - loss: 0.4252 - val_accuracy: 0.7763 - val_loss: 0.4869
Epoch 32/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8551 - loss: 0.4040
Epoch 32: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 228ms/step - accuracy: 0.8554 - loss: 0.4036 - val_accuracy: 0.7763 - val_loss: 0.4867
Epoch 33/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8765 - loss: 0.3837
Epoch 33: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8757 - loss: 0.3845 - val_accuracy: 0.7763 - val_loss: 0.4865
Epoch 34/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 219ms/step - accuracy: 0.8560 - loss: 0.4129
Epoch 34: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 226ms/step - accuracy: 0.8565 - loss: 0.4119 - val_accuracy: 0.7763 - val_loss: 0.4868
Epoch 35/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.8298 - loss: 0.4071
Epoch 35: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8313 - loss: 0.4064 - val_accuracy: 0.7763 - val_loss: 0.4878
Epoch 36/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 221ms/step - accuracy: 0.8774 - loss: 0.3989
Epoch 36: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 229ms/step - accuracy: 0.8775 - loss: 0.3988 - val_accuracy: 0.7763 - val_loss: 0.4875
Epoch 37/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.8446 - loss: 0.4130
Epoch 37: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 227ms/step - accuracy: 0.8452 - loss: 0.4120 - val_accuracy: 0.7632 - val_loss: 0.4844
Epoch 38/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 222ms/step - accuracy: 0.8408 - loss: 0.4143
Epoch 38: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 229ms/step - accuracy: 0.8424 - loss: 0.4131 - val_accuracy: 0.7763 - val_loss: 0.4858
Epoch 39/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.8463 - loss: 0.4210
Epoch 39: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 228ms/step - accuracy: 0.8480 - loss: 0.4194 - val_accuracy: 0.7763 - val_loss: 0.4864
Epoch 40/50
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 220ms/step - accuracy: 0.8597 - loss: 0.4010
Epoch 40: val_accuracy did not improve from 0.78947
[1m16/16[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m4s[0m 228ms/step - accuracy: 0.8601 - loss: 0.4007 - val_accuracy: 0.7763 - val_loss: 0.4871
Epoch 40: early stopping
评估模型
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# 加载效果最好的模型权重
model.load_weights('best_model.weights.h5')
预测模型
from PIL import Image
import numpy as np
# img = Image.open(r"C:\Users\11054\Desktop\kLearning\t5_learning\data\test\adidas\0.jpg")
img = Image.open(r"C:\Users\11054\Desktop\kLearning\t5_learning\data\test\nike\12.jpg")
image = tf.image.resize(img, [img_height, img_width])
img_array = tf.expand_dims(image, 0) #/255.0 # 记得做归一化处理(与训练集处理方式保持一致)
predictions = model.predict(img_array) # 这里选用你已经训练好的模型
print("预测结果为:",class_names[np.argmax(predictions)])
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 24ms/step
预测结果为: nike
#个人总结
降低dropout数值后模型准确度显著提升