T6:明星识别

>- **🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/0dvHCaOoFnW8SCp3JpzKxg) 中的学习记录博客**
>- **🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)**

一、模型训练

import os
import tensorflow as tf
import pathlib
import PIL.Image
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

# 设置环境变量以避免OpenMP错误
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

# GPU配置
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    gpu0 = gpus[0]
    tf.config.experimental.set_memory_growth(gpu0, True)  # 设置显存按需增长
    tf.config.set_visible_devices([gpu0], "GPU")
    print("GPU available.")
else:
    print("GPU cannot be found, using CPU instead.")

data_dir = "D:/others/pycharm/pythonProject/T6/48-data"
data_dir = pathlib.Path(data_dir)

image_count = len(list(data_dir.glob('*/*.jpg')))
print("图片总数为:", image_count)

roses = list(data_dir.glob('Jennifer Lawrence/*.jpg'))
PIL.Image.open(str(roses[0]))

batch_size = 32
img_height = 224
img_width = 224

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.1,
    subset="training",
    label_mode="categorical",
    seed=123,
    image_size=(img_height, img_width),
    batch_size=batch_size)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.1,
    subset="validation",
    label_mode="categorical",
    seed=123,
    image_size=(img_height, img_width),
    batch_size=batch_size)

class_names = train_ds.class_names
print(class_names)

plt.figure(figsize=(20, 10))

for images, labels in train_ds.take(1):
    for i in range(20):
        ax = plt.subplot(5, 10, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title(class_names[np.argmax(labels[i])])
        plt.axis("off")

for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break

AUTOTUNE = tf.data.AUTOTUNE

train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

model = models.Sequential([
    layers.experimental.preprocessing.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),
    layers.Conv2D(16, (3, 3), activation='relu'),
    layers.AveragePooling2D((2, 2)),
    layers.Conv2D(32, (3, 3), activation='relu'),
    layers.AveragePooling2D((2, 2)),
    layers.Dropout(0.5),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.AveragePooling2D((2, 2)),
    layers.Dropout(0.5),
    layers.Conv2D(128, (3, 3), activation='relu'),
    layers.Dropout(0.5),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dense(len(class_names), activation='softmax')
])

model.summary()

# 设置初始学习率
initial_learning_rate = 1e-4

lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate,
    decay_steps=60,
    decay_rate=0.96,
    staircase=True
)

# 将指数衰减学习率送入优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)

model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

epochs = 100

# 保存最佳模型参数
checkpointer = ModelCheckpoint('best_model.h5',
                               monitor='val_accuracy',
                               verbose=1,
                               save_best_only=True,
                               save_weights_only=True)

# 设置早停
earlystopper = EarlyStopping(monitor='val_accuracy',
                             min_delta=0.001,
                             patience=20,
                             verbose=1)

history = model.fit(train_ds,
                    validation_data=val_ds,
                    epochs=epochs,
                    callbacks=[checkpointer, earlystopper])

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(len(loss))

plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

输出:

D:\others\anaconda\envs\deep_learning_env\python.exe D:\others\pycharm\pythonProject\T6_Recognition_of_Celebrity.py 
GPU cannot be found, using CPU instead.
图片总数为: 1800
Found 1800 files belonging to 17 classes.
Using 1620 files for training.
2024-07-15 20:47:53.782507: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-07-15 20:47:53.783329: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
Found 1800 files belonging to 17 classes.
Using 180 files for validation.
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']
(32, 224, 224, 3)
(32, 17)
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 rescaling (Rescaling)       (None, 224, 224, 3)       0         
                                                                 
 conv2d (Conv2D)             (None, 222, 222, 16)      448       
                                                                 
 average_pooling2d (AverageP  (None, 111, 111, 16)     0         
 ooling2D)                                                       
                                                                 
 conv2d_1 (Conv2D)           (None, 109, 109, 32)      4640      
                                                                 
 average_pooling2d_1 (Averag  (None, 54, 54, 32)       0         
 ePooling2D)                                                     
                                                                 
 dropout (Dropout)           (None, 54, 54, 32)        0         
                                                                 
 conv2d_2 (Conv2D)           (None, 52, 52, 64)        18496     
                                                                 
 average_pooling2d_2 (Averag  (None, 26, 26, 64)       0         
 ePooling2D)                                                     
                                                                 
 dropout_1 (Dropout)         (None, 26, 26, 64)        0         
                                                                 
 conv2d_3 (Conv2D)           (None, 24, 24, 128)       73856     
                                                                 
 dropout_2 (Dropout)         (None, 24, 24, 128)       0         
                                                                 
 flatten (Flatten)           (None, 73728)             0         
                                                                 
 dense (Dense)               (None, 128)               9437312   
                                                                 
 dense_1 (Dense)             (None, 17)                2193      
                                                                 
=================================================================
Total params: 9,536,945
Trainable params: 9,536,945
Non-trainable params: 0
_________________________________________________________________
Epoch 1/100
51/51 [==============================] - ETA: 0s - loss: 2.8059 - accuracy: 0.1043
Epoch 1: val_accuracy improved from -inf to 0.14444, saving model to best_model.h5
51/51 [==============================] - 14s 245ms/step - loss: 2.8059 - accuracy: 0.1043 - val_loss: 2.7737 - val_accuracy: 0.1444
Epoch 2/100
51/51 [==============================] - ETA: 0s - loss: 2.7316 - accuracy: 0.1185
Epoch 2: val_accuracy improved from 0.14444 to 0.17778, saving model to best_model.h5
51/51 [==============================] - 12s 230ms/step - loss: 2.7316 - accuracy: 0.1185 - val_loss: 2.6913 - val_accuracy: 0.1778
Epoch 3/100
51/51 [==============================] - ETA: 0s - loss: 2.6347 - accuracy: 0.1691
Epoch 3: val_accuracy improved from 0.17778 to 0.20000, saving model to best_model.h5
51/51 [==============================] - 12s 226ms/step - loss: 2.6347 - accuracy: 0.1691 - val_loss: 2.5987 - val_accuracy: 0.2000
Epoch 4/100
51/51 [==============================] - ETA: 0s - loss: 2.5154 - accuracy: 0.1969
Epoch 4: val_accuracy did not improve from 0.20000
51/51 [==============================] - 11s 225ms/step - loss: 2.5154 - accuracy: 0.1969 - val_loss: 2.5774 - val_accuracy: 0.1500
Epoch 5/100
51/51 [==============================] - ETA: 0s - loss: 2.4183 - accuracy: 0.2173
Epoch 5: val_accuracy did not improve from 0.20000
51/51 [==============================] - 11s 223ms/step - loss: 2.4183 - accuracy: 0.2173 - val_loss: 2.6151 - val_accuracy: 0.2000
Epoch 6/100
51/51 [==============================] - ETA: 0s - loss: 2.3264 - accuracy: 0.2463
Epoch 6: val_accuracy did not improve from 0.20000
51/51 [==============================] - 12s 226ms/step - loss: 2.3264 - accuracy: 0.2463 - val_loss: 2.5773 - val_accuracy: 0.1833
Epoch 7/100
51/51 [==============================] - ETA: 0s - loss: 2.2888 - accuracy: 0.2765
Epoch 7: val_accuracy improved from 0.20000 to 0.20556, saving model to best_model.h5
51/51 [==============================] - 11s 223ms/step - loss: 2.2888 - accuracy: 0.2765 - val_loss: 2.4805 - val_accuracy: 0.2056
Epoch 8/100
51/51 [==============================] - ETA: 0s - loss: 2.1929 - accuracy: 0.2889
Epoch 8: val_accuracy did not improve from 0.20556
51/51 [==============================] - 11s 219ms/step - loss: 2.1929 - accuracy: 0.2889 - val_loss: 2.4957 - val_accuracy: 0.2056
Epoch 9/100
51/51 [==============================] - ETA: 0s - loss: 2.1210 - accuracy: 0.3167
Epoch 9: val_accuracy did not improve from 0.20556
51/51 [==============================] - 12s 225ms/step - loss: 2.1210 - accuracy: 0.3167 - val_loss: 2.5066 - val_accuracy: 0.2000
Epoch 10/100
51/51 [==============================] - ETA: 0s - loss: 2.0446 - accuracy: 0.3352
Epoch 10: val_accuracy improved from 0.20556 to 0.26667, saving model to best_model.h5
51/51 [==============================] - 12s 230ms/step - loss: 2.0446 - accuracy: 0.3352 - val_loss: 2.5901 - val_accuracy: 0.2667
Epoch 11/100
51/51 [==============================] - ETA: 0s - loss: 1.9624 - accuracy: 0.3593
Epoch 11: val_accuracy did not improve from 0.26667
51/51 [==============================] - 12s 229ms/step - loss: 1.9624 - accuracy: 0.3593 - val_loss: 2.5257 - val_accuracy: 0.2444
Epoch 12/100
51/51 [==============================] - ETA: 0s - loss: 1.9022 - accuracy: 0.3926
Epoch 12: val_accuracy improved from 0.26667 to 0.27222, saving model to best_model.h5
51/51 [==============================] - 12s 231ms/step - loss: 1.9022 - accuracy: 0.3926 - val_loss: 2.4548 - val_accuracy: 0.2722
Epoch 13/100
51/51 [==============================] - ETA: 0s - loss: 1.8275 - accuracy: 0.4142
Epoch 13: val_accuracy did not improve from 0.27222
51/51 [==============================] - 12s 226ms/step - loss: 1.8275 - accuracy: 0.4142 - val_loss: 2.4777 - val_accuracy: 0.2389
Epoch 14/100
51/51 [==============================] - ETA: 0s - loss: 1.7410 - accuracy: 0.4506
Epoch 14: val_accuracy did not improve from 0.27222
51/51 [==============================] - 11s 218ms/step - loss: 1.7410 - accuracy: 0.4506 - val_loss: 2.4653 - val_accuracy: 0.2278
Epoch 15/100
51/51 [==============================] - ETA: 0s - loss: 1.6623 - accuracy: 0.4679
Epoch 15: val_accuracy did not improve from 0.27222
51/51 [==============================] - 12s 227ms/step - loss: 1.6623 - accuracy: 0.4679 - val_loss: 2.5068 - val_accuracy: 0.2389
Epoch 16/100
51/51 [==============================] - ETA: 0s - loss: 1.5894 - accuracy: 0.5031
Epoch 16: val_accuracy did not improve from 0.27222
51/51 [==============================] - 12s 226ms/step - loss: 1.5894 - accuracy: 0.5031 - val_loss: 2.6726 - val_accuracy: 0.2500
Epoch 17/100
51/51 [==============================] - ETA: 0s - loss: 1.5055 - accuracy: 0.5105
Epoch 17: val_accuracy improved from 0.27222 to 0.28889, saving model to best_model.h5
51/51 [==============================] - 12s 230ms/step - loss: 1.5055 - accuracy: 0.5105 - val_loss: 2.5550 - val_accuracy: 0.2889
Epoch 18/100
51/51 [==============================] - ETA: 0s - loss: 1.4209 - accuracy: 0.5426
Epoch 18: val_accuracy did not improve from 0.28889
51/51 [==============================] - 11s 225ms/step - loss: 1.4209 - accuracy: 0.5426 - val_loss: 2.5054 - val_accuracy: 0.2611
Epoch 19/100
51/51 [==============================] - ETA: 0s - loss: 1.3516 - accuracy: 0.5698
Epoch 19: val_accuracy did not improve from 0.28889
51/51 [==============================] - 11s 224ms/step - loss: 1.3516 - accuracy: 0.5698 - val_loss: 2.4656 - val_accuracy: 0.2889
Epoch 20/100
51/51 [==============================] - ETA: 0s - loss: 1.2661 - accuracy: 0.5981
Epoch 20: val_accuracy improved from 0.28889 to 0.29444, saving model to best_model.h5
51/51 [==============================] - 12s 228ms/step - loss: 1.2661 - accuracy: 0.5981 - val_loss: 2.5023 - val_accuracy: 0.2944
Epoch 21/100
51/51 [==============================] - ETA: 0s - loss: 1.2095 - accuracy: 0.6136
Epoch 21: val_accuracy improved from 0.29444 to 0.30556, saving model to best_model.h5
51/51 [==============================] - 12s 226ms/step - loss: 1.2095 - accuracy: 0.6136 - val_loss: 2.5947 - val_accuracy: 0.3056
Epoch 22/100
51/51 [==============================] - ETA: 0s - loss: 1.1406 - accuracy: 0.6321
Epoch 22: val_accuracy did not improve from 0.30556
51/51 [==============================] - 12s 229ms/step - loss: 1.1406 - accuracy: 0.6321 - val_loss: 2.5971 - val_accuracy: 0.2833
Epoch 23/100
51/51 [==============================] - ETA: 0s - loss: 1.0511 - accuracy: 0.6654
Epoch 23: val_accuracy did not improve from 0.30556
51/51 [==============================] - 12s 229ms/step - loss: 1.0511 - accuracy: 0.6654 - val_loss: 2.5463 - val_accuracy: 0.2833
Epoch 24/100
51/51 [==============================] - ETA: 0s - loss: 1.0056 - accuracy: 0.6802
Epoch 24: val_accuracy improved from 0.30556 to 0.31667, saving model to best_model.h5
51/51 [==============================] - 11s 224ms/step - loss: 1.0056 - accuracy: 0.6802 - val_loss: 2.5001 - val_accuracy: 0.3167
Epoch 25/100
51/51 [==============================] - ETA: 0s - loss: 0.9363 - accuracy: 0.7136
Epoch 25: val_accuracy did not improve from 0.31667
51/51 [==============================] - 12s 230ms/step - loss: 0.9363 - accuracy: 0.7136 - val_loss: 2.6370 - val_accuracy: 0.3167
Epoch 26/100
51/51 [==============================] - ETA: 0s - loss: 0.8897 - accuracy: 0.7185
Epoch 26: val_accuracy did not improve from 0.31667
51/51 [==============================] - 12s 226ms/step - loss: 0.8897 - accuracy: 0.7185 - val_loss: 2.6777 - val_accuracy: 0.2889
Epoch 27/100
51/51 [==============================] - ETA: 0s - loss: 0.8279 - accuracy: 0.7321
Epoch 27: val_accuracy improved from 0.31667 to 0.35000, saving model to best_model.h5
51/51 [==============================] - 12s 230ms/step - loss: 0.8279 - accuracy: 0.7321 - val_loss: 2.6074 - val_accuracy: 0.3500
Epoch 28/100
51/51 [==============================] - ETA: 0s - loss: 0.7850 - accuracy: 0.7432
Epoch 28: val_accuracy did not improve from 0.35000
51/51 [==============================] - 12s 229ms/step - loss: 0.7850 - accuracy: 0.7432 - val_loss: 2.6908 - val_accuracy: 0.2889
Epoch 29/100
51/51 [==============================] - ETA: 0s - loss: 0.7177 - accuracy: 0.7728
Epoch 29: val_accuracy did not improve from 0.35000
51/51 [==============================] - 12s 227ms/step - loss: 0.7177 - accuracy: 0.7728 - val_loss: 2.6413 - val_accuracy: 0.3500
Epoch 30/100
51/51 [==============================] - ETA: 0s - loss: 0.6894 - accuracy: 0.7907
Epoch 30: val_accuracy did not improve from 0.35000
51/51 [==============================] - 12s 231ms/step - loss: 0.6894 - accuracy: 0.7907 - val_loss: 2.6495 - val_accuracy: 0.3500
Epoch 31/100
51/51 [==============================] - ETA: 0s - loss: 0.6455 - accuracy: 0.7932
Epoch 31: val_accuracy did not improve from 0.35000
51/51 [==============================] - 12s 226ms/step - loss: 0.6455 - accuracy: 0.7932 - val_loss: 2.6998 - val_accuracy: 0.3278
Epoch 32/100
51/51 [==============================] - ETA: 0s - loss: 0.6002 - accuracy: 0.8111
Epoch 32: val_accuracy improved from 0.35000 to 0.36667, saving model to best_model.h5
51/51 [==============================] - 12s 229ms/step - loss: 0.6002 - accuracy: 0.8111 - val_loss: 2.7325 - val_accuracy: 0.3667
Epoch 33/100
51/51 [==============================] - ETA: 0s - loss: 0.5707 - accuracy: 0.8216
Epoch 33: val_accuracy did not improve from 0.36667
51/51 [==============================] - 12s 225ms/step - loss: 0.5707 - accuracy: 0.8216 - val_loss: 2.7083 - val_accuracy: 0.3500
Epoch 34/100
51/51 [==============================] - ETA: 0s - loss: 0.5239 - accuracy: 0.8469
Epoch 34: val_accuracy did not improve from 0.36667
51/51 [==============================] - 12s 228ms/step - loss: 0.5239 - accuracy: 0.8469 - val_loss: 2.7905 - val_accuracy: 0.3667
Epoch 35/100
51/51 [==============================] - ETA: 0s - loss: 0.5083 - accuracy: 0.8481
Epoch 35: val_accuracy did not improve from 0.36667
51/51 [==============================] - 11s 224ms/step - loss: 0.5083 - accuracy: 0.8481 - val_loss: 2.7329 - val_accuracy: 0.3611
Epoch 36/100
51/51 [==============================] - ETA: 0s - loss: 0.4689 - accuracy: 0.8562
Epoch 36: val_accuracy improved from 0.36667 to 0.37222, saving model to best_model.h5
51/51 [==============================] - 12s 230ms/step - loss: 0.4689 - accuracy: 0.8562 - val_loss: 2.8145 - val_accuracy: 0.3722
Epoch 37/100
51/51 [==============================] - ETA: 0s - loss: 0.4337 - accuracy: 0.8648
Epoch 37: val_accuracy improved from 0.37222 to 0.38333, saving model to best_model.h5
51/51 [==============================] - 12s 232ms/step - loss: 0.4337 - accuracy: 0.8648 - val_loss: 2.8489 - val_accuracy: 0.3833
Epoch 38/100
51/51 [==============================] - ETA: 0s - loss: 0.4350 - accuracy: 0.8611
Epoch 38: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 223ms/step - loss: 0.4350 - accuracy: 0.8611 - val_loss: 2.8442 - val_accuracy: 0.3667
Epoch 39/100
51/51 [==============================] - ETA: 0s - loss: 0.4056 - accuracy: 0.8784
Epoch 39: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 223ms/step - loss: 0.4056 - accuracy: 0.8784 - val_loss: 2.8933 - val_accuracy: 0.3611
Epoch 40/100
51/51 [==============================] - ETA: 0s - loss: 0.3654 - accuracy: 0.8883
Epoch 40: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 227ms/step - loss: 0.3654 - accuracy: 0.8883 - val_loss: 2.9233 - val_accuracy: 0.3722
Epoch 41/100
51/51 [==============================] - ETA: 0s - loss: 0.3514 - accuracy: 0.8926
Epoch 41: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 214ms/step - loss: 0.3514 - accuracy: 0.8926 - val_loss: 2.9773 - val_accuracy: 0.3778
Epoch 42/100
51/51 [==============================] - ETA: 0s - loss: 0.3646 - accuracy: 0.8944
Epoch 42: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 230ms/step - loss: 0.3646 - accuracy: 0.8944 - val_loss: 2.9606 - val_accuracy: 0.3667
Epoch 43/100
51/51 [==============================] - ETA: 0s - loss: 0.3259 - accuracy: 0.9006
Epoch 43: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 224ms/step - loss: 0.3259 - accuracy: 0.9006 - val_loss: 3.0564 - val_accuracy: 0.3667
Epoch 44/100
51/51 [==============================] - ETA: 0s - loss: 0.3318 - accuracy: 0.8994
Epoch 44: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 227ms/step - loss: 0.3318 - accuracy: 0.8994 - val_loss: 2.9886 - val_accuracy: 0.3778
Epoch 45/100
51/51 [==============================] - ETA: 0s - loss: 0.3087 - accuracy: 0.9099
Epoch 45: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 223ms/step - loss: 0.3087 - accuracy: 0.9099 - val_loss: 3.0468 - val_accuracy: 0.3722
Epoch 46/100
51/51 [==============================] - ETA: 0s - loss: 0.2843 - accuracy: 0.9117
Epoch 46: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 243ms/step - loss: 0.2843 - accuracy: 0.9117 - val_loss: 3.1296 - val_accuracy: 0.3500
Epoch 47/100
51/51 [==============================] - ETA: 0s - loss: 0.2587 - accuracy: 0.9247
Epoch 47: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 226ms/step - loss: 0.2587 - accuracy: 0.9247 - val_loss: 3.0639 - val_accuracy: 0.3722
Epoch 48/100
51/51 [==============================] - ETA: 0s - loss: 0.2800 - accuracy: 0.9210
Epoch 48: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 227ms/step - loss: 0.2800 - accuracy: 0.9210 - val_loss: 3.0848 - val_accuracy: 0.3722
Epoch 49/100
51/51 [==============================] - ETA: 0s - loss: 0.2428 - accuracy: 0.9272
Epoch 49: val_accuracy did not improve from 0.38333
51/51 [==============================] - 12s 229ms/step - loss: 0.2428 - accuracy: 0.9272 - val_loss: 3.2032 - val_accuracy: 0.3722
Epoch 50/100
51/51 [==============================] - ETA: 0s - loss: 0.2469 - accuracy: 0.9228
Epoch 50: val_accuracy did not improve from 0.38333
51/51 [==============================] - 11s 225ms/step - loss: 0.2469 - accuracy: 0.9228 - val_loss: 3.2183 - val_accuracy: 0.3833
Epoch 51/100
51/51 [==============================] - ETA: 0s - loss: 0.2317 - accuracy: 0.9321
Epoch 51: val_accuracy improved from 0.38333 to 0.38889, saving model to best_model.h5
51/51 [==============================] - 11s 224ms/step - loss: 0.2317 - accuracy: 0.9321 - val_loss: 3.2221 - val_accuracy: 0.3889
Epoch 52/100
51/51 [==============================] - ETA: 0s - loss: 0.2449 - accuracy: 0.9278
Epoch 52: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 226ms/step - loss: 0.2449 - accuracy: 0.9278 - val_loss: 3.2339 - val_accuracy: 0.3833
Epoch 53/100
51/51 [==============================] - ETA: 0s - loss: 0.2188 - accuracy: 0.9340
Epoch 53: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 227ms/step - loss: 0.2188 - accuracy: 0.9340 - val_loss: 3.1986 - val_accuracy: 0.3667
Epoch 54/100
51/51 [==============================] - ETA: 0s - loss: 0.2094 - accuracy: 0.9358
Epoch 54: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 224ms/step - loss: 0.2094 - accuracy: 0.9358 - val_loss: 3.2569 - val_accuracy: 0.3667
Epoch 55/100
51/51 [==============================] - ETA: 0s - loss: 0.2139 - accuracy: 0.9364
Epoch 55: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 224ms/step - loss: 0.2139 - accuracy: 0.9364 - val_loss: 3.2635 - val_accuracy: 0.3778
Epoch 56/100
51/51 [==============================] - ETA: 0s - loss: 0.1936 - accuracy: 0.9438
Epoch 56: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 232ms/step - loss: 0.1936 - accuracy: 0.9438 - val_loss: 3.2311 - val_accuracy: 0.3667
Epoch 57/100
51/51 [==============================] - ETA: 0s - loss: 0.1849 - accuracy: 0.9543
Epoch 57: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 227ms/step - loss: 0.1849 - accuracy: 0.9543 - val_loss: 3.3000 - val_accuracy: 0.3556
Epoch 58/100
51/51 [==============================] - ETA: 0s - loss: 0.1949 - accuracy: 0.9457
Epoch 58: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 229ms/step - loss: 0.1949 - accuracy: 0.9457 - val_loss: 3.3517 - val_accuracy: 0.3611
Epoch 59/100
51/51 [==============================] - ETA: 0s - loss: 0.1775 - accuracy: 0.9531
Epoch 59: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 232ms/step - loss: 0.1775 - accuracy: 0.9531 - val_loss: 3.3539 - val_accuracy: 0.3611
Epoch 60/100
51/51 [==============================] - ETA: 0s - loss: 0.1701 - accuracy: 0.9562
Epoch 60: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 225ms/step - loss: 0.1701 - accuracy: 0.9562 - val_loss: 3.3654 - val_accuracy: 0.3444
Epoch 61/100
51/51 [==============================] - ETA: 0s - loss: 0.1726 - accuracy: 0.9537
Epoch 61: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 227ms/step - loss: 0.1726 - accuracy: 0.9537 - val_loss: 3.3319 - val_accuracy: 0.3500
Epoch 62/100
51/51 [==============================] - ETA: 0s - loss: 0.1485 - accuracy: 0.9679
Epoch 62: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 228ms/step - loss: 0.1485 - accuracy: 0.9679 - val_loss: 3.3508 - val_accuracy: 0.3500
Epoch 63/100
51/51 [==============================] - ETA: 0s - loss: 0.1550 - accuracy: 0.9562
Epoch 63: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 222ms/step - loss: 0.1550 - accuracy: 0.9562 - val_loss: 3.4061 - val_accuracy: 0.3556
Epoch 64/100
51/51 [==============================] - ETA: 0s - loss: 0.1611 - accuracy: 0.9636
Epoch 64: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 227ms/step - loss: 0.1611 - accuracy: 0.9636 - val_loss: 3.4297 - val_accuracy: 0.3722
Epoch 65/100
51/51 [==============================] - ETA: 0s - loss: 0.1669 - accuracy: 0.9506
Epoch 65: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 229ms/step - loss: 0.1669 - accuracy: 0.9506 - val_loss: 3.4415 - val_accuracy: 0.3722
Epoch 66/100
51/51 [==============================] - ETA: 0s - loss: 0.1397 - accuracy: 0.9599
Epoch 66: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 228ms/step - loss: 0.1397 - accuracy: 0.9599 - val_loss: 3.4333 - val_accuracy: 0.3667
Epoch 67/100
51/51 [==============================] - ETA: 0s - loss: 0.1462 - accuracy: 0.9667
Epoch 67: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 224ms/step - loss: 0.1462 - accuracy: 0.9667 - val_loss: 3.4746 - val_accuracy: 0.3722
Epoch 68/100
51/51 [==============================] - ETA: 0s - loss: 0.1408 - accuracy: 0.9642
Epoch 68: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 229ms/step - loss: 0.1408 - accuracy: 0.9642 - val_loss: 3.4451 - val_accuracy: 0.3611
Epoch 69/100
51/51 [==============================] - ETA: 0s - loss: 0.1447 - accuracy: 0.9623
Epoch 69: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 222ms/step - loss: 0.1447 - accuracy: 0.9623 - val_loss: 3.4972 - val_accuracy: 0.3722
Epoch 70/100
51/51 [==============================] - ETA: 0s - loss: 0.1420 - accuracy: 0.9599
Epoch 70: val_accuracy did not improve from 0.38889
51/51 [==============================] - 11s 221ms/step - loss: 0.1420 - accuracy: 0.9599 - val_loss: 3.5396 - val_accuracy: 0.3611
Epoch 71/100
51/51 [==============================] - ETA: 0s - loss: 0.1342 - accuracy: 0.9636
Epoch 71: val_accuracy did not improve from 0.38889
51/51 [==============================] - 12s 228ms/step - loss: 0.1342 - accuracy: 0.9636 - val_loss: 3.5661 - val_accuracy: 0.3556
Epoch 71: early stopping

二、学习笔记

从图表中可以看出,训练准确率逐渐增加并趋于稳定,而验证准确率在较低的范围内波动。这表明模型可能存在过拟合的问题。训练损失在逐渐减小,而验证损失在一定范围内波动并略有增加,进一步证实了这一点。

以下是一些可能的改进建议:

  1. 增加正则化: 添加正则化(如L2正则化)到卷积层和全连接层,可以帮助减少过拟合。

  2. 调整Dropout率: 尝试调整Dropout的比例(例如增加Dropout的比例),以防止模型过拟合。

  3. 数据增强: 使用数据增强技术可以增加训练数据的多样性,从而帮助模型更好地泛化。

  4. 减少模型复杂度: 尝试减少模型的复杂度,例如减少卷积层的数量或减少每层的滤波器数量。

  5. 增加训练数据: 如果可能,增加训练数据的数量,可以帮助模型学习到更泛化的特征。

  6. 调整学习率和学习率衰减: 尝试不同的初始学习率或调整学习率衰减策略,可能会对模型的训练效果产生影响。

那么,什么是正择化?

正则化是一种防止机器学习模型过拟合的方法。它通过在训练过程中向损失函数添加额外的惩罚项,来限制模型的复杂度使模型更具泛化能力。常见的正则化方法有两种:L1正则化和L2正则化

L1 


一些方法的学习

pathlib.Path
'*/*.jpg'
data_dir.glob
PIL.Image.open

tf.keras.preprocessing.image_dataset_from_directory

该方法用于从目录中加载图像数据集,它的作用是将目录中的图像数据整合为一个适用于keras模型的 tf.data.Dataset 对象,这样之后就可以通过调用,来使用数据集。

语法:

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    directory,                      # 图片数据的目录路径
    labels='inferred',              # 标签推断方式,可以是'inferred'、None或一个标签列表
    label_mode='int',               # 标签模式,可以是'int'、'categorical'或'binary'
    class_names=None,               # 自定义类别名称列表,默认为目录中的子目录名按字母顺序排序
    color_mode='rgb',               # 图片颜色模式,可以是'rgb'或'grayscale'
    batch_size=32,                  # 每个批次的图像数量
    image_size=(256, 256),          # 调整图片大小,格式为(height, width)
    shuffle=True,                   # 是否在加载数据时打乱顺序
    seed=None,                      # 随机种子,用于打乱数据时保持一致性
    validation_split=None,          # 验证集的比例,范围在[0, 1)之间
    subset=None,                    # 指定数据集是'training'还是'validation'
    interpolation='bilinear',       # 调整图像大小时使用的插值方法
    follow_links=False,             # 是否跟随符号链接
    crop_to_aspect_ratio=False      # 是否裁剪图像以保持宽高比
)


train_ds.class_names

class_name是 tf.data.Dataset 对象的一个属性,使用 tf.keras.preprocessing.image_dataset_from_directory 方法创建数据集时,生成的数据集对象包含了这个属性。它的作用是用来调取你创建的数据集下的分类。

比如说,

class_names = train_ds.class_names
print(class_names)

这个意思是说,查看之前你创建的dataset目录下的分类名称。train_ds是之前创建的对象。


plt.figure(figsize=(20, 10))

这个语句用来创建一个新的图形窗口,并设置窗口的大小。它的原型应该是matplotlib.pyplot.figure(),是Matplotlib 库中 pyplot 模块的一个方法。

它的语法是:

matplotlib.pyplot.figure(num=None, figsize=None, dpi=None, facecolor=None, edgecolor=None, frameon=True, FigureClass=<class 'matplotlib.figure.Figure'>, clear=False, **kwargs)

需要注意的是,在plt(也就是matplotlib)中,所有绘图命令都是针对当前的活动图形和轴进行的。当前的活动图形和轴是由最近的 plt.figure() (该命令是创建一个图形窗口,就是说后面的绘图命令都会在这个窗口上画图)plt.subplot()plt.axes() 调用确定的。如果没有明确调用 plt.figure() 或者 plt.subplot()/plt.axes(),Matplotlib 会自动创建一个图形和一个轴,并将其设置为活动的。

比如说,如果我要创建2张图:

import matplotlib.pyplot as plt

# 创建第一个图形窗口
plt.figure(figsize=(10, 5))
# 在第一个图形中绘制一条折线
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], label='Line 1')
# 设置第一个图形的标题和轴标签
plt.title("First Plot")
plt.xlabel("X-axis 1")
plt.ylabel("Y-axis 1")
plt.legend()
# 显示第一个图形
plt.show()

# 创建第二个图形窗口
plt.figure(figsize=(8, 6))
# 在第二个图形中绘制另一条折线
plt.plot([1, 2, 3, 4], [30, 25, 20, 10], label='Line 2', color='red')
# 设置第二个图形的标题和轴标签
plt.title("Second Plot")
plt.xlabel("X-axis 2")
plt.ylabel("Y-axis 2")
plt.legend()
# 显示第二个图形
plt.show()

可以看见,第二张图的开头就是plt.figure()。


for images, labels in train_ds.take(1):

  • 14
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值