T6打卡—学习笔记

# 设置Gpu
from tensorflow       import keras
from tensorflow.keras import layers,models
import os, PIL, pathlib
import matplotlib.pyplot as plt
import tensorflow        as tf
import numpy             as np

gpus = tf.config.list_physical_devices("GPU")

if gpus:
    gpu0 = gpus[0]                                        #如果有多个GPU,仅使用第0个GPU
    tf.config.experimental.set_memory_growth(gpu0, True)  #设置GPU显存用量按需使用
    tf.config.set_visible_devices([gpu0],"GPU")

gpus
[]
# 导入并查看数据
data_dir = r"C:\Users\11054\Desktop\kLearning\t6_learning\data"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpg')))
print("图片总数为:",image_count)
图片总数为: 3600
roses = list(data_dir.glob('Jennifer Lawrence/*.jpg'))
PIL.Image.open(str(roses[0]))

在这里插入图片描述

# 数据预处理
batch_size = 32
img_height = 256
img_width = 256
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.1,
    subset="training",
    label_mode = "categorical",
    seed=123,
    image_size=(img_height, img_width),
    batch_size=batch_size)

"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.1,
    subset="validation",
    label_mode = "categorical",
    seed=123,
    image_size=(img_height, img_width),
    batch_size=batch_size)
Found 3600 files belonging to 17 classes.
Using 3240 files for training.
Found 3600 files belonging to 17 classes.
Using 360 files for validation.
# 查看标签和图片
class_names = train_ds.class_names
print(class_names)
plt.figure(figsize=(20, 10))

for images, labels in train_ds.take(1):
    for i in range(20):
        ax = plt.subplot(5, 10, i + 1)

        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title(class_names[np.argmax(labels[i])])

        plt.axis("off")
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']

在这里插入图片描述

# 检查数据
for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break
(32, 224, 224, 3)
(32, 17)
# 配置数据集
AUTOTUNE = tf.data.AUTOTUNE

train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# 构建CNN网络
"""
关于卷积核的计算不懂的可以参考文章:https://blog.csdn.net/qq_38251616/article/details/114278995

layers.Dropout(0.4) 作用是防止过拟合,提高模型的泛化能力。
关于Dropout层的更多介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/115826689
"""

model = models.Sequential([
    layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
    layers.Conv2D(16, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)), # 卷积层1,卷积核3*3
    layers.BatchNormalization(),                     # 添加BN层
    layers.AveragePooling2D((2, 2)),                 # 池化层1,2*2采样
    layers.Conv2D(32, (3, 3), activation='relu'),    # 卷积层2,卷积核3*3
    layers.BatchNormalization(),                     # 添加BN层
    layers.AveragePooling2D((2, 2)),                 # 池化层2,2*2采样
    layers.Dropout(0.5),
    layers.Conv2D(64, (3, 3), activation='relu'),    # 卷积层3,卷积核3*3
    layers.BatchNormalization(),                     # 添加BN层
    layers.AveragePooling2D((2, 2)),
    layers.Dropout(0.5),
    layers.Conv2D(128, (3, 3), activation='relu'),   # 卷积层4,卷积核3*3
    layers.BatchNormalization(),                     # 添加BN层
    layers.Dropout(0.5),

    layers.Flatten(),                                # Flatten层,连接卷积层与全连接层
    layers.Dense(128, activation='relu'),            # 全连接层,特征进一步提取
    layers.BatchNormalization(),                     # 添加BN层
    layers.Dense(len(class_names))                   # 输出层,输出预期结果
])

model.summary()  # 打印网络结构
C:\Users\11054\.conda\envs\py311\Lib\site-packages\keras\src\layers\preprocessing\tf_data_layer.py:18: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(**kwargs)
C:\Users\11054\.conda\envs\py311\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                         ┃ Output Shape                ┃         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ rescaling_2 (Rescaling)              │ (None, 224, 224, 3)         │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_8 (Conv2D)                    │ (None, 222, 222, 16)        │             448 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization                  │ (None, 222, 222, 16)        │              64 │
│ (BatchNormalization)                 │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ average_pooling2d_6                  │ (None, 111, 111, 16)        │               0 │
│ (AveragePooling2D)                   │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_9 (Conv2D)                    │ (None, 109, 109, 32)        │           4,640 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_1                │ (None, 109, 109, 32)        │             128 │
│ (BatchNormalization)                 │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ average_pooling2d_7                  │ (None, 54, 54, 32)          │               0 │
│ (AveragePooling2D)                   │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_6 (Dropout)                  │ (None, 54, 54, 32)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_10 (Conv2D)                   │ (None, 52, 52, 64)          │          18,496 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_2                │ (None, 52, 52, 64)          │             256 │
│ (BatchNormalization)                 │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ average_pooling2d_8                  │ (None, 26, 26, 64)          │               0 │
│ (AveragePooling2D)                   │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_7 (Dropout)                  │ (None, 26, 26, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_11 (Conv2D)                   │ (None, 24, 24, 128)         │          73,856 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_3                │ (None, 24, 24, 128)         │             512 │
│ (BatchNormalization)                 │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_8 (Dropout)                  │ (None, 24, 24, 128)         │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ flatten_2 (Flatten)                  │ (None, 73728)               │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_4 (Dense)                      │ (None, 128)                 │       9,437,312 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_4                │ (None, 128)                 │             512 │
│ (BatchNormalization)                 │                             │                 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_5 (Dense)                      │ (None, 17)                  │           2,193 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 9,538,417 (36.39 MB)
 Trainable params: 9,537,681 (36.38 MB)
 Non-trainable params: 736 (2.88 KB)

设置动态学习率

# 设置初始学习率
initial_learning_rate = 1e-4

lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate,
        decay_steps=60,      # 敲黑板!!!这里是指 steps,不是指epochs
        decay_rate=0.96,     # lr经过一次衰减就会变成 decay_rate*lr
        staircase=True)

# 将指数衰减学习率送入优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)

model.compile(optimizer=optimizer,
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

损失函数Loss详解:

  1. binary_crossentropy(对数损失函数)

与 sigmoid 相对应的损失函数,针对于二分类问题。

  1. categorical_crossentropy(多分类的对数损失函数)

与 softmax 相对应的损失函数,如果是one-hot编码,则使用 categorical_crossentropy

  • 调用方法一:

model.compile(optimizer=“adam”,
loss=‘categorical_crossentropy’,
metrics=[‘accuracy’])

  • 调用方法二:

model.compile(optimizer=“adam”,
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[‘accuracy’])

  1. sparse_categorical_crossentropy(稀疏性多分类的对数损失函数)

与 softmax 相对应的损失函数,如果是整数编码,则使用 sparse_categorical_crossentropy

  • 调用方法一:

model.compile(optimizer=“adam”,
loss=‘sparse_categorical_crossentropy’,
metrics=[‘accuracy’])

  • 调用方法二:

model.compile(optimizer=“adam”,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[‘accuracy’])

函数原型

tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name=‘sparse_categorical_crossentropy’
)

参数说明:

from_logits: 为True时,会将y_pred转化为概率(用softmax),否则不进行转换,通常情况下用True结果更稳定;
reduction:类型为tf.keras.losses.Reduction,对loss进行处理,默认是AUTO;
name: name

设置早停

EarlyStopping()参数说明:

  • monitor: 被监测的数据。
  • min_delta: 在被监测的数据中被认为是提升的最小变化, 例如,小于 min_delta 的绝对变化会被认为没有提升。
  • patience: 没有进步的训练轮数,在这之后训练就会被停止。
  • verbose: 详细信息模式。
  • mode: {auto, min, max} 其中之一。 在 min 模式中, 当被监测的数据停止下降,训练就会停止;在 max 模式中,当被监测的数据停止上升,训练就会停止;在 auto 模式中,方向会自动从被监测的数据的名字中判断出来。
  • baseline: 要监控的数量的基准值。 如果模型没有显示基准的改善,训练将停止。
  • estore_best_weights: 是否从具有监测数量的最佳值的时期恢复模型权重。 如果为 False,则使用在训练的最后一步获得的模型权重。
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

epochs = 100

# 保存最佳模型参数
checkpointer = ModelCheckpoint('best_model.weights.h5',
                                monitor='val_accuracy',
                                verbose=1,
                                save_best_only=True,
                                save_weights_only=True)

# 设置早停
earlystopper = EarlyStopping(monitor='val_accuracy',
                             min_delta=0.001,
                             patience=20,
                             verbose=1)

训练模型

history = model.fit(train_ds,
                    validation_data=val_ds,
                    epochs=epochs,
                    callbacks=[checkpointer, earlystopper])
Epoch 1/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 408ms/step - accuracy: 0.1376 - loss: 3.1025
Epoch 1: val_accuracy improved from -inf to 0.08056, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m46s[0m 419ms/step - accuracy: 0.1379 - loss: 3.1002 - val_accuracy: 0.0806 - val_loss: 2.8348
Epoch 2/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.3448 - loss: 2.0340
Epoch 2: val_accuracy improved from 0.08056 to 0.11111, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 416ms/step - accuracy: 0.3450 - loss: 2.0337 - val_accuracy: 0.1111 - val_loss: 3.2089
Epoch 3/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 408ms/step - accuracy: 0.4646 - loss: 1.6663
Epoch 3: val_accuracy improved from 0.11111 to 0.16389, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 418ms/step - accuracy: 0.4647 - loss: 1.6662 - val_accuracy: 0.1639 - val_loss: 2.9885
Epoch 4/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.5494 - loss: 1.4220
Epoch 4: val_accuracy improved from 0.16389 to 0.21667, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 412ms/step - accuracy: 0.5496 - loss: 1.4217 - val_accuracy: 0.2167 - val_loss: 2.8192
Epoch 5/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 401ms/step - accuracy: 0.6510 - loss: 1.1654
Epoch 5: val_accuracy improved from 0.21667 to 0.30833, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 410ms/step - accuracy: 0.6509 - loss: 1.1655 - val_accuracy: 0.3083 - val_loss: 2.4227
Epoch 6/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 413ms/step - accuracy: 0.7351 - loss: 0.9649
Epoch 6: val_accuracy improved from 0.30833 to 0.40000, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 423ms/step - accuracy: 0.7349 - loss: 0.9651 - val_accuracy: 0.4000 - val_loss: 1.9446
Epoch 7/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.7599 - loss: 0.8986
Epoch 7: val_accuracy improved from 0.40000 to 0.45000, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 413ms/step - accuracy: 0.7599 - loss: 0.8984 - val_accuracy: 0.4500 - val_loss: 1.8153
Epoch 8/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.8122 - loss: 0.7271
Epoch 8: val_accuracy improved from 0.45000 to 0.45833, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 413ms/step - accuracy: 0.8121 - loss: 0.7272 - val_accuracy: 0.4583 - val_loss: 1.7950
Epoch 9/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.8508 - loss: 0.6247
Epoch 9: val_accuracy improved from 0.45833 to 0.47778, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.8507 - loss: 0.6248 - val_accuracy: 0.4778 - val_loss: 1.7466
Epoch 10/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 410ms/step - accuracy: 0.8809 - loss: 0.5505
Epoch 10: val_accuracy improved from 0.47778 to 0.48333, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 419ms/step - accuracy: 0.8809 - loss: 0.5506 - val_accuracy: 0.4833 - val_loss: 1.6903
Epoch 11/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 414ms/step - accuracy: 0.9110 - loss: 0.4850
Epoch 11: val_accuracy improved from 0.48333 to 0.48889, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 423ms/step - accuracy: 0.9109 - loss: 0.4850 - val_accuracy: 0.4889 - val_loss: 1.6706
Epoch 12/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 411ms/step - accuracy: 0.9331 - loss: 0.4198
Epoch 12: val_accuracy improved from 0.48889 to 0.51389, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 421ms/step - accuracy: 0.9330 - loss: 0.4199 - val_accuracy: 0.5139 - val_loss: 1.6316
Epoch 13/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 421ms/step - accuracy: 0.9469 - loss: 0.3716
Epoch 13: val_accuracy did not improve from 0.51389
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m44s[0m 430ms/step - accuracy: 0.9469 - loss: 0.3717 - val_accuracy: 0.5000 - val_loss: 1.6615
Epoch 14/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 407ms/step - accuracy: 0.9690 - loss: 0.3244
Epoch 14: val_accuracy improved from 0.51389 to 0.52222, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m43s[0m 418ms/step - accuracy: 0.9689 - loss: 0.3245 - val_accuracy: 0.5222 - val_loss: 1.6336
Epoch 15/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.9631 - loss: 0.3060
Epoch 15: val_accuracy did not improve from 0.52222
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9631 - loss: 0.3060 - val_accuracy: 0.5056 - val_loss: 1.6253
Epoch 16/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 404ms/step - accuracy: 0.9661 - loss: 0.2795
Epoch 16: val_accuracy improved from 0.52222 to 0.53333, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9661 - loss: 0.2796 - val_accuracy: 0.5333 - val_loss: 1.6280
Epoch 17/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9797 - loss: 0.2492
Epoch 17: val_accuracy did not improve from 0.53333
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9797 - loss: 0.2493 - val_accuracy: 0.5278 - val_loss: 1.6102
Epoch 18/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9768 - loss: 0.2485
Epoch 18: val_accuracy improved from 0.53333 to 0.53889, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 412ms/step - accuracy: 0.9768 - loss: 0.2485 - val_accuracy: 0.5389 - val_loss: 1.5958
Epoch 19/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9843 - loss: 0.2263
Epoch 19: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 410ms/step - accuracy: 0.9843 - loss: 0.2263 - val_accuracy: 0.5333 - val_loss: 1.5964
Epoch 20/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9820 - loss: 0.2071
Epoch 20: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9820 - loss: 0.2071 - val_accuracy: 0.5222 - val_loss: 1.6034
Epoch 21/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 401ms/step - accuracy: 0.9856 - loss: 0.1909
Epoch 21: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 410ms/step - accuracy: 0.9856 - loss: 0.1910 - val_accuracy: 0.5389 - val_loss: 1.6007
Epoch 22/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.9885 - loss: 0.1841
Epoch 22: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9885 - loss: 0.1841 - val_accuracy: 0.5139 - val_loss: 1.6120
Epoch 23/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 405ms/step - accuracy: 0.9905 - loss: 0.1742
Epoch 23: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 413ms/step - accuracy: 0.9905 - loss: 0.1742 - val_accuracy: 0.5333 - val_loss: 1.5867
Epoch 24/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 404ms/step - accuracy: 0.9925 - loss: 0.1639
Epoch 24: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 412ms/step - accuracy: 0.9925 - loss: 0.1640 - val_accuracy: 0.5361 - val_loss: 1.5717
Epoch 25/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.9908 - loss: 0.1616
Epoch 25: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9908 - loss: 0.1616 - val_accuracy: 0.5389 - val_loss: 1.5785
Epoch 26/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.9889 - loss: 0.1520
Epoch 26: val_accuracy did not improve from 0.53889
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 412ms/step - accuracy: 0.9890 - loss: 0.1520 - val_accuracy: 0.5306 - val_loss: 1.5847
Epoch 27/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 405ms/step - accuracy: 0.9947 - loss: 0.1522
Epoch 27: val_accuracy improved from 0.53889 to 0.54167, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 416ms/step - accuracy: 0.9947 - loss: 0.1522 - val_accuracy: 0.5417 - val_loss: 1.5715
Epoch 28/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9933 - loss: 0.1477
Epoch 28: val_accuracy did not improve from 0.54167
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 415ms/step - accuracy: 0.9933 - loss: 0.1476 - val_accuracy: 0.5361 - val_loss: 1.5867
Epoch 29/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9964 - loss: 0.1392
Epoch 29: val_accuracy did not improve from 0.54167
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 415ms/step - accuracy: 0.9964 - loss: 0.1392 - val_accuracy: 0.5361 - val_loss: 1.5947
Epoch 30/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9953 - loss: 0.1342
Epoch 30: val_accuracy improved from 0.54167 to 0.55556, saving model to best_model.weights.h5
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 416ms/step - accuracy: 0.9953 - loss: 0.1342 - val_accuracy: 0.5556 - val_loss: 1.5768
Epoch 31/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9948 - loss: 0.1326
Epoch 31: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 415ms/step - accuracy: 0.9949 - loss: 0.1325 - val_accuracy: 0.5472 - val_loss: 1.5713
Epoch 32/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9962 - loss: 0.1262
Epoch 32: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9962 - loss: 0.1262 - val_accuracy: 0.5472 - val_loss: 1.5751
Epoch 33/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 405ms/step - accuracy: 0.9975 - loss: 0.1147
Epoch 33: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9975 - loss: 0.1148 - val_accuracy: 0.5417 - val_loss: 1.5769
Epoch 34/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 404ms/step - accuracy: 0.9973 - loss: 0.1205
Epoch 34: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9973 - loss: 0.1205 - val_accuracy: 0.5528 - val_loss: 1.5657
Epoch 35/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 406ms/step - accuracy: 0.9974 - loss: 0.1157
Epoch 35: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9974 - loss: 0.1158 - val_accuracy: 0.5500 - val_loss: 1.5527
Epoch 36/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 401ms/step - accuracy: 0.9970 - loss: 0.1102
Epoch 36: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 409ms/step - accuracy: 0.9970 - loss: 0.1102 - val_accuracy: 0.5472 - val_loss: 1.5561
Epoch 37/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 399ms/step - accuracy: 0.9978 - loss: 0.1072
Epoch 37: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 407ms/step - accuracy: 0.9978 - loss: 0.1072 - val_accuracy: 0.5417 - val_loss: 1.5641
Epoch 38/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 400ms/step - accuracy: 0.9969 - loss: 0.1066
Epoch 38: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 408ms/step - accuracy: 0.9969 - loss: 0.1066 - val_accuracy: 0.5556 - val_loss: 1.5585
Epoch 39/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 399ms/step - accuracy: 0.9986 - loss: 0.1058
Epoch 39: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 408ms/step - accuracy: 0.9986 - loss: 0.1058 - val_accuracy: 0.5472 - val_loss: 1.5690
Epoch 40/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 399ms/step - accuracy: 0.9974 - loss: 0.1040
Epoch 40: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 408ms/step - accuracy: 0.9974 - loss: 0.1040 - val_accuracy: 0.5472 - val_loss: 1.5677
Epoch 41/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 401ms/step - accuracy: 0.9966 - loss: 0.0998
Epoch 41: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 409ms/step - accuracy: 0.9966 - loss: 0.0998 - val_accuracy: 0.5528 - val_loss: 1.5675
Epoch 42/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9980 - loss: 0.0941
Epoch 42: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 410ms/step - accuracy: 0.9980 - loss: 0.0941 - val_accuracy: 0.5500 - val_loss: 1.5738
Epoch 43/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 400ms/step - accuracy: 0.9968 - loss: 0.1026
Epoch 43: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 408ms/step - accuracy: 0.9969 - loss: 0.1026 - val_accuracy: 0.5472 - val_loss: 1.5729
Epoch 44/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 404ms/step - accuracy: 0.9997 - loss: 0.0937
Epoch 44: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 412ms/step - accuracy: 0.9997 - loss: 0.0937 - val_accuracy: 0.5472 - val_loss: 1.5712
Epoch 45/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 404ms/step - accuracy: 0.9960 - loss: 0.0987
Epoch 45: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 413ms/step - accuracy: 0.9960 - loss: 0.0986 - val_accuracy: 0.5444 - val_loss: 1.5635
Epoch 46/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9978 - loss: 0.0934
Epoch 46: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 410ms/step - accuracy: 0.9978 - loss: 0.0934 - val_accuracy: 0.5444 - val_loss: 1.5577
Epoch 47/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 402ms/step - accuracy: 0.9983 - loss: 0.0929
Epoch 47: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9983 - loss: 0.0929 - val_accuracy: 0.5444 - val_loss: 1.5616
Epoch 48/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 403ms/step - accuracy: 0.9989 - loss: 0.0870
Epoch 48: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 411ms/step - accuracy: 0.9989 - loss: 0.0870 - val_accuracy: 0.5472 - val_loss: 1.5651
Epoch 49/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 405ms/step - accuracy: 0.9995 - loss: 0.0908
Epoch 49: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 414ms/step - accuracy: 0.9995 - loss: 0.0909 - val_accuracy: 0.5417 - val_loss: 1.5546
Epoch 50/100
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 401ms/step - accuracy: 0.9980 - loss: 0.0859
Epoch 50: val_accuracy did not improve from 0.55556
[1m102/102[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m42s[0m 409ms/step - accuracy: 0.9980 - loss: 0.0859 - val_accuracy: 0.5500 - val_loss: 1.5478
Epoch 50: early stopping

评估模型

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(len(loss))

plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

在这里插入图片描述

使用模型预测

# 加载效果最好的模型权重
model.load_weights('best_model.weights.h5')
from PIL import Image
import numpy as np

img = Image.open(r"C:\Users\11054\Desktop\kLearning\t6_learning\data\Angelina Jolie\001_fe3347c0.jpg")  #这里选择你需要预测的图片
image = tf.image.resize(img, [img_height, img_width])

img_array = tf.expand_dims(image, 0)

predictions = model.predict(img_array) # 这里选用你已经训练好的模型
print("预测结果为:",class_names[np.argmax(predictions)])
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 85ms/step
预测结果为: Angelina Jolie

个人总结

jupyter nbconvert --to markdown file_path.ipynb
模型训练精度效果良好但是测试精度较差
添加BN层后测试精度得到提高

根据引用内容,STM32C8T6是一款通用增强型的48脚单片机,具有64K闪存和LQFP封装。它适用于工业级温度范围-40~85度。下面是一些关于STM32C8T6学习笔记: 1. 学习资料:可以从ST官方网站下载STM32C8T6的数据手册和参考手册,这些手册包含了该单片机的详细信息和使用方法。 2. 开发环境:为了开始学习STM32C8T6,你需要安装相应的开发环境。ST官方提供了一款免费的集成开发环境(IDE)——STM32CubeIDE,它可以帮助你进行代码编写、调试和下载。 3. 编程语言:STM32C8T6可以使用多种编程语言进行开发,包括C语言和汇编语言。C语言是最常用的编程语言,它可以通过STM32CubeIDE进行编写和调试。 4. 引脚配置:在使用STM32C8T6之前,你需要了解每个引脚的功能和配置。数据手册中有一张引脚功能表,可以帮助你了解每个引脚的用途和配置方法。 5. 时钟配置:STM32C8T6具有多个时钟源和时钟分频器,你需要根据自己的需求配置正确的时钟。时钟配置对于外设的正常工作非常重要。 6. 中断和定时器:STM32C8T6支持中断和定时器功能,这些功能可以帮助你实现各种任务和功能。你可以通过配置中断和定时器来实现外设的响应和定时操作。 7. 外设驱动:STM32C8T6具有丰富的外设,包括GPIO、UART、SPI、I2C等。你可以根据自己的需求选择合适的外设,并学习如何配置和驱动这些外设。 8. 调试和下载:在开发过程中,你可以使用STM32CubeIDE提供的调试功能来调试你的代码。一旦代码调试完成,你可以使用ST-Link或其他下载器将代码下载到STM32C8T6上运行。 希望以上笔记对你学习STM32C8T6有所帮助!如果你有任何进一步的问题,请随时提问。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值