DL学习笔记 T4
【TF NOTE】T4 猴痘病识别
前言
🍨 本文为🔗365天深度学习训练营 中的学习记录博客
🍖 原作者:K同学啊
一、设置GPU
导入需要的模块并指定GPU
# noinspection PyUnresolvedReferences
from tensorflow import keras
# noinspection PyUnresolvedReferences
from tensorflow.keras import layers, models
import os, PIL, pathlib
# noinspection PyUnresolvedReferences
import matplotlib.pyplot as plt
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
if gpus:
gpu0 = gpus[0]
tf.config.experimental.set_memory_growth(gpu0, True)
tf.config.set_visible_devices([gpu0], "GPU")
二、正式开始
1.加载数据
加载数据进训练集和验证集
代码如下(示例):
data_dir = "D:/BaiduNetdiskDownload/T4/"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpg')))
print("图片总数为:", image_count)
Monkeypox = list(data_dir.glob('Monkeypox/*.jpg'))
img1 = PIL.Image.open(str(Monkeypox[0]))
img1.show()
#加载数据
batch_size =32
img_height = 224
img_width =224
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
class_names = train_ds.class_names
print(class_names)
输出
2.可视化数据
#可视化数据
plt.figure(figsize=(20,10))
for images, labels in train_ds.take(1):
for i in range(20):
ax = plt.subplot(5,10,i+1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
输出
3.建立模型
老演员CNN
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
#建立CNN模型
num_classes = 2
model = models.Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width,3)),
layers.Conv2D(16,(3,3),activation='relu',input_shape=(img_height,img_width,3)),
layers.AveragePooling2D((2,2)),
layers.Conv2D(32,(3,3),activation='relu'),
layers.AveragePooling2D((2,2)),
layers.Dropout(0.3),
layers.Conv2D(64,(3,3),activation='relu'),
layers.Dropout(0.3),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.summary()
输出
4.编译模型
opt = tf.keras.optimizers.Adam(learning_rate= 1e-4)
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint
epochs = 50
checkpointer = ModelCheckpoint('best_model.h5',
monitor='val_accuracy',
verbose=1,
save_best_only=True,
save_weights_only=True)
history = model.fit(train_ds,
validation_data=val_ds,
epochs=epochs,
callbacks=[checkpointer])
5.评估模型
#模型评估
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc = 'lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1,2,2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range,val_loss, label='Validation Loss')
plt.legend(loc = 'upper right')
plt.title('Training and Validation Loss')
plt.show()
输出ACC和LOSS曲线
6.预测
model.load_weights('best_model.h5')
from PIL import Image
import numpy as np
img = Image.open("D:/BaiduNetdiskDownload/T4/Others/NM05_01_04.jpg")
img = np.array(img)
print(img.shape)
image = tf.image.resize(img, [224, 224])
img_array = tf.expand_dims(image, 0)/255.0
predictions = model.predict(img_array)
print("预测结果为:", class_names[np.argmax(predictions)])
输出
总结
本次课程预先配置数据集来提升CPU利用效率,通过保存表现最好的模型参数来保存局部最优解并利用该模型来预测数据图片