Tensorflow---使用Tensorflow进行猫狗分类模型构建

一、代码中的数据集可以通过以下链接进行下载

百度网盘提取码:lala

二、代码运行环境

Tensorflow-gpu==2.4.0

Python==3.7

三、数据的读取处理代码如下:

import tensorflow as tf
import os
import glob
import random
import matplotlib.pyplot as plt

# 环境变量的配置
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'

label_to_index = {
    'cat': 0,
    'dog': 1
}

index_to_label = {
    0: 'cat',
    1: 'dog'
}


# 进行图片的解析,同时进行图片增强
def train_load_image_by_path(path, label):
    img = tf.io.read_file(path)
    img = tf.image.decode_jpeg(img, channels=3)
    img = tf.image.resize(img, [360, 360])
    img = tf.image.random_crop(img, [256, 256, 3])  # 随机裁剪
    img = tf.image.random_flip_left_right(img)  # 随机左右翻转
    img = tf.image.random_flip_up_down(img)  # 随机上下翻转
    # img = tf.image.random_brightness(img, 0.5)  # 随机改变亮度
    # img = tf.image.random_contrast(img, 0, 1)  # 随机改变图像的对比度
    img = tf.cast(img, tf.float32)
    img = img / 255.0
    label = [tf.cast(label, tf.int64)]
    return img, label


# 进行图片的解析
def test_load_image_by_path(path, label):
    img = tf.io.read_file(path)
    img = tf.image.decode_jpeg(img, channels=3)
    img = tf.image.resize(img, [256, 256])
    img = tf.cast(img, tf.float32)
    img = img / 255.0
    label = [tf.cast(label, tf.int64)]
    return img, label


# 数据的提取
def make_dataset():
    train_image_path = glob.glob('dataset/train/*.jpg')
    random.shuffle(train_image_path)
    train_image_label = [label_to_index.get(img_path.split('\\')[1].split('.')[0]) for img_path in train_image_path]
    train_dataset = tf.data.Dataset.from_tensor_slices((train_image_path, train_image_label))
    train_dataset = train_dataset.map(train_load_image_by_path, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train_dataset = train_dataset.shuffle(100).batch(16)
    train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)

    test_image_path = glob.glob('dataset/test_data/*/*.jpg')
    test_image_label = [label_to_index.get(img_test_path.split('\\')[1]) for img_test_path in test_image_path]
    test_dataset = tf.data.Dataset.from_tensor_slices((test_image_path, test_image_label))
    test_dataset = test_dataset.map(test_load_image_by_path, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    test_dataset = test_dataset.batch(16)
    test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE)

    return train_dataset, test_dataset


if __name__ == '__main__':
    train_data, test_data = make_dataset()
    for imgg, labeel in train_data.take(1):
        plt.title(index_to_label.get(labeel.numpy()[0][0]))
        plt.imshow(imgg[0])
        plt.show()

四、模型构建代码如下:

import tensorflow as tf
import os

# 配置环境变量
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'


# 进行模型的构建
def make_model():
    model = tf.keras.Sequential([
        tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu', padding='same'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(1024, activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Dense(256, activation='relu'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Dense(1)
    ])
    model.summary()
    return model


if __name__ == '__main__':
    mol = make_model()

五、模型的训练代码如下:

import tensorflow as tf
import os
from data_loader import make_dataset
from model import make_model
import tqdm
import datetime

# 环境变量的配置
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'

# 数据的加载
train_dataset, test_dataset = make_dataset()

# 模型的构建
model = make_model()

# 模型训练的相关配置

# 配置loss函数
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# 配置优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

# 配置训练时的计算loss均值的计算器
train_loss_mean = tf.keras.metrics.Mean()
# 配置训练时的准确率计算器
train_accuracy = tf.keras.metrics.Accuracy()
# 配置测试时的计算loss均值的计算器
test_loss_mean = tf.keras.metrics.Mean()
# 配置测试时的准确率计算器
test_accuracy = tf.keras.metrics.Accuracy()

# 定义训练的epochs
num_epochs = 30
# 定义文件写入器
now_datatime = datetime.datetime.now().strftime('%Y%m%d-%H$M$S')
train_writer = tf.summary.create_file_writer('logs/' + now_datatime + 'train')
test_writer = tf.summary.create_file_writer('logs/' + now_datatime + 'test')


def train_step(mod, images, labels):
    with tf.GradientTape() as t:
        pre = mod(images)
        step_loss = loss_func(labels, pre)
    grads = t.gradient(step_loss, mod.trainable_variables)
    optimizer.apply_gradients(zip(grads, mod.trainable_variables))
    train_loss_mean(step_loss)
    train_accuracy(labels, tf.cast(pre > 0, tf.int32))


def test_step(mod, images, labels):
    pre = mod(images, training=False)
    step_loss = loss_func(labels, pre)
    test_loss_mean(step_loss)
    test_accuracy(labels, tf.cast(pre > 0, tf.int32))


# 定义训练
def train():
    for epoch in range(num_epochs):
        # 开始进行训练
        tqdm_train = tqdm.tqdm(enumerate(train_dataset), total=len(train_dataset))
        for (batches, (images, labels)) in tqdm_train:
            train_step(model, images, labels)
            tqdm_train.set_description_str('Epoch:{}_train'.format(epoch))
            tqdm_train.set_postfix_str(
                'Train loss is {:.14f},Train accuracy is {:.14f}.'.format(train_loss_mean.result(),
                                                                          train_accuracy.result()))
        with train_writer.as_default():
            tf.summary.scalar('train_loss', data=train_loss_mean.result(), step=epoch)
            tf.summary.scalar('train_accuracy', data=train_accuracy.result(), step=epoch)
        train_accuracy.reset_states()
        train_loss_mean.reset_states()
        tqdm_train.close()

        # 开始进行验证
        tqdm_test = tqdm.tqdm(enumerate(test_dataset), total=len(test_dataset))
        for (batches, (images, labels)) in tqdm_test:
            test_step(model, images, labels)
            tqdm_test.set_description_str('Epoch:{}_test'.format(epoch))
            tqdm_test.set_postfix_str('Test loss is {:.14f},Test accuracy is {:.14f}.'.format(test_loss_mean.result(),
                                                                                              test_accuracy.result()))
        with test_writer.as_default():
            tf.summary.scalar('test_loss', data=test_loss_mean.result(), step=epoch)
            tf.summary.scalar('test_accuracy', data=test_accuracy.result(), step=epoch)
        test_loss_mean.reset_states()
        test_accuracy.reset_states()
        tqdm_test.close()
        print('\n')
    model.save(r'model_data/my_model.h5')


if __name__ == '__main__':
    train()

  • 1
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

水哥很水

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值