Tensorflow2.6 Keras 实现kaggle猫狗大战

数据集在kaggle官网获取。不知为何CSDN资源没办法上传。

Dogs vs. Cats | KaggleCreate an algorithm to distinguish dogs from catsicon-default.png?t=N7T8https://www.kaggle.com/c/dogs-vs-cats

首先是制作数据集

input_data.py

import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

'''
从原始数据集中获取图片和标签
对所得图像进行预处理
'''


class readData(object):

    def __init__(self, io='../data/train/'):
        """
        初始化
        :param io: 数据集路径
        """

        self.val_dataset = None
        self.train_dataset = None
        self.io = io
        self.image_list = None  # 图像路径
        self.label_list = None  # 对应标签
        self.label_names = ['cat', 'dog']

    def get_files(self):

        # return: 乱序后的图片和标签
        cats = []
        label_cats = []
        dogs = []
        label_dogs = []
        file_dir = self.io
        # 载入数据路径并写入标签值
        for file in os.listdir(file_dir):
            name = file.split(sep='.')
            if name[0] == 'cat':
                cats.append(file_dir + file)
                label_cats.append(0)  # 猫猫置0
            else:
                dogs.append(file_dir + file)
                label_dogs.append(1)  # 狗狗置1
        print("There are %d cats\nThere are %d dogs" % (len(cats), len(dogs)))

        # 合并猫狗图片路径和标签
        image_list = np.hstack((cats, dogs))
        label_list = np.hstack((label_cats, label_dogs))
        # 将图片和标签合并为矩阵
        temp = np.array([image_list, label_list])
        temp = temp.transpose()  # 转置
        # 乱序
        np.random.shuffle(temp)
        # 返回乱序后图片路径和标签
        image_list = list(temp[:, 0])
        label_list = list(temp[:, 1])
        label_list = np.asarray(label_list).astype(np.int)  # 整数标签
        self.image_list = image_list
        self.label_list = label_list
        label_list = np.eye(2)[label_list]  # 转为one-hot矩阵

        return image_list, label_list

    def load_and_preprocess_image(self, path):
        image = tf.io.read_file(path)
        image = tf.image.decode_jpeg(image, channels=3)
        image = tf.image.resize(image, [192, 192])
        # image /= 255.0  # normalize to [0,1] range,放到网络里去做(因为还要对图像进行预处理)

        return image

    def show_img(self, index):
        assert self.image_list is not None, "use get_files func to load files first!"
        img = self.image_list[index]
        label = self.label_list[index]
        plt.imshow((self.load_and_preprocess_image(img))/255.0)
        plt.grid(False)
        plt.title(self.label_names[label])
        plt.show()

    def get_dataset(self):
        image_list, label_list = self.get_files()
        # 划分训练集和验证集
        image_train, image_val, label_train, label_val = train_test_split(image_list, label_list, test_size=0.3)

        # 训练集dataset制作
        train_path_ds = tf.data.Dataset.from_tensor_slices(image_train)
        train_image_ds = train_path_ds.map(self.load_and_preprocess_image,
                                           num_parallel_calls=tf.data.experimental.AUTOTUNE)
        train_label_ds = tf.data.Dataset.from_tensor_slices(label_train)
        train_dataset = tf.data.Dataset.zip((train_image_ds, train_label_ds))
        self.train_dataset = train_dataset

        # 验证集dataset制作
        val_path_ds = tf.data.Dataset.from_tensor_slices(image_val)
        val_image_ds = val_path_ds.map(self.load_and_preprocess_image,
                                       num_parallel_calls=tf.data.experimental.AUTOTUNE)
        val_label_ds = tf.data.Dataset.from_tensor_slices(label_val)
        val_dataset = tf.data.Dataset.zip((val_image_ds, val_label_ds))
        self.val_dataset = val_dataset

        return train_dataset, val_dataset



# # 测试程序
# d = readData()
# tds, vds = d.get_dataset()
# index = np.random.randint(low=1, high=9000, size=5, dtype=int)
# for i in index:
#     d.show_img(i)
# for item in tds.take(3):
#     print(item)
# print(tds.cardinality().numpy())
# print(vds.cardinality().numpy())

搭建模型,能够训练并且保存模型参数

model_make.py

import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from input_data import readData
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential


class Model(object):
    def __init__(self, img_height=192, img_width=192, img_dim=3):
        """
        构建模型
        :param batch_size: 批次大小
        :param epoch: 迭代次数
        :param lr: 学习率
        :param img_height: 图片长
        :param img_width: 宽
        :param img_dim: 通道数量
        """
        self.batch_size = None
        self.epoch = None
        self.lr = 0.0001

        self.img_height = img_height
        self.img_width = img_width
        self.img_dim = img_dim

        self.checkpoint_path = "../checkpoint_data/logs"
        self.checkpoint_dir = os.path.dirname(self.checkpoint_path)

    def create_model(self, show_model=True):
        # 数据增强,防止过拟合
        data_augmentation = keras.Sequential([
            layers.RandomFlip("horizontal",
                              input_shape=(self.img_height,
                                           self.img_width,
                                           self.img_dim)),
            layers.RandomRotation(0.1),
            layers.RandomZoom(0.1),
        ])

        # 建立模型
        model = Sequential([
            data_augmentation,
            layers.Rescaling(1. / 255),  # 标准化0-1
            layers.Conv2D(64, 3, activation="relu", padding="same",
                          input_shape=[self.img_height, self.img_width, self.img_dim]),
            layers.MaxPooling2D(2),
            layers.Conv2D(128, 3, activation="relu", padding="same"),
            layers.Conv2D(128, 3, activation="relu", padding="same"),
            layers.MaxPooling2D(2),
            layers.Conv2D(256, 3, activation="relu", padding="same"),
            layers.Conv2D(256, 3, activation="relu", padding="same"),
            layers.MaxPooling2D(2),
            layers.Flatten(),
            layers.Dropout(0.2),
            layers.Dense(128, activation="relu"),
            layers.Dense(256, activation="relu"),
            # layers.Dropout(0.5),
            layers.Dense(512, activation="relu"),
            layers.Dense(2, activation="softmax")
        ])

        # 显示模型结构
        if show_model:
            model.summary()
        # 优化器,损失函数,准确率
        model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.lr),
                      loss="categorical_crossentropy", metrics=["acc"])

        return model

    def train_model(self, train_ds, val_ds, batch_size=64, epoch=50, lr=0.0001):
        self.batch_size = batch_size
        self.epoch = epoch
        self.lr = lr

        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=self.checkpoint_path,
                                                         save_best_only=True,
                                                         save_weights_only=True,
                                                         monitor='val_acc',
                                                         period=1,
                                                         verbose=1)

        model = self.create_model()

        # 计算步长
        train_count, val_count = train_ds.cardinality().numpy(), val_ds.cardinality().numpy()
        steps_per_epoch = train_count // batch_size
        validation_steps = val_count // batch_size

        # 将数据切成batch_size
        # 设置一个和数据集大小一致的 shuffle buffer size(随机缓冲区大小)以保证数据被充分打乱。
        train_ds = train_ds.shuffle(buffer_size=train_count / 4)  # 电脑内存受不了,换个小的缓冲区
        # train_ds = train_ds.shuffle(buffer_size=8000)
        train_ds = train_ds.repeat()
        train_ds = train_ds.batch(batch_size)
        # 当模型在训练的时候,`prefetch` 使数据集在后台取得 batch。
        train_ds = train_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        # 测试数据集不用过多处理
        val_ds = val_ds.batch(batch_size)

        # 开始训练模型并保存参数,以备预测用
        history = model.fit(train_ds,
                            epochs=self.epoch,
                            steps_per_epoch=steps_per_epoch,
                            validation_data=val_ds,
                            validation_steps=validation_steps,
                            callbacks=[cp_callback]
                            )

        return history

    def plot_acc_and_loss(self, history):
        acc = history.history['acc']
        val_acc = history.history['val_acc']

        loss = history.history["loss"]
        val_loss = history.history["val_loss"]

        epochs_range = range(self.epoch)

        plt.figure(figsize=(8, 8))
        plt.subplot(1, 2, 1)
        plt.plot(epochs_range, acc, label='Training Accuracy')
        plt.plot(epochs_range, val_acc, label='Validation Accuracy')
        plt.legend(loc='lower right')
        plt.title('Training and Validation Accuracy')

        plt.subplot(1, 2, 2)
        plt.plot(epochs_range, loss, label='Training Loss')
        plt.plot(epochs_range, val_loss, label='Validation Loss')
        plt.legend(loc='upper right')
        plt.title('Training and Validation Loss')
        plt.show()

    def predict_images(self, test_images_path):
        model = self.create_model(False)
        latest = tf.train.latest_checkpoint(self.checkpoint_dir)
        model.load_weights(latest)

        img = tf.keras.utils.load_img(
            test_images_path, target_size=(self.img_height, self.img_width)
        )
        img_array = tf.keras.utils.img_to_array(img)
        img_array = tf.expand_dims(img_array, 0)  # Create a batch

        predictions = model.predict(img_array)

        return predictions


# # 测试程序
# # train_ds, val_ds = readData().get_dataset()
# model = Model()
# # history = model.train_model(train_ds, val_ds, epoch=10, batch_size=32)
# # model.plot_acc_and_loss(history)
# # pre = model.predict_images('../data/test/5.jpg')
#
#
# test_img_path = '../data/test/'
# img = []
# pre = []
# count = 0
# for file in os.listdir(test_img_path):
#     count += 1
#     temp = test_img_path + file
#     pre.append(model.predict_images(temp))
#     if count == 10:
#         break
#
# pre = np.argmax(pre, axis=-1).flatten()
# class_name = ['cat', 'dog']
# pre = np.array([class_name[i] for i in pre])
#
# print(pre)

最后是训练并进行预测

import os

from input_data import readData
from model_make import Model

import numpy as np
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential

if '__main__' == __name__:
    train_ds, val_ds = readData().get_dataset()

    # 训练模型并保存权重、绘制图像
    model = Model()
    history = model.train_model(train_ds, val_ds, epoch=50, batch_size=32)
    model.plot_acc_and_loss(history)

    # 预测图像
    test_img_path = '../data/test/'

    img = []
    pre = []
    filename = []
    count = 0
    for file in os.listdir(test_img_path):
        count += 1
        temp = test_img_path + file
        pre.append(model.predict_images(temp))
        filename.append(file)
        if count == 10:
            break

    pre = np.argmax(pre, axis=-1).flatten()
    class_name = ['cat', 'dog']
    pre = np.array([class_name[i] for i in pre])

    result = np.vstack((filename, pre)).T
    print(result)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值