使用jupyter实现猫和狗的分类

这篇博客是参照一个pycharm来写的,其实改的也不多,但是自己跑出来了
主要包括,读取数据,定义模型,训练,保存模型,提取模型进行分类

1,训练数据的读取

#x训练数据的读取
import tensorflow as tf
import numpy as np
import os

获取文件,从一个特定的目录中获取

#获取文件的路径
def get_files(file_dir):
    cats=[]
    label_cats=[]
    dogs=[]
    label_dogs=[]
    #载入路径并且写入标签值
    for file in os.listdir(file_dir):
        name=file.split(sep='.')
        if name[0]=='cat':
            cats.append(file_dir+file)
            label_cats.append(0)
        else:
            dogs.append(file_dir+file)
            label_dogs.append(1)
    print("There are %d cats\nThere are %d dogs" % (len(cats), len(dogs)))
    #打乱文件的顺序
    image_list=np.hstack((cats,dogs))#将猫和狗的图片整个一起
    label_list=np.hstack((label_cats,label_dogs))
    temp=np.array([image_list,label_list])
    temp=temp.transpose()
    np.random.shuffle(temp)#使用这种方式将他们打乱
    image_list=list(temp[:,0])#第一列是图像
    label_list=list(temp[:,1])#第二列是标签
    label_list=[int(i) for i in label_list]
    
    return image_list,label_list

#定义生成批次的函数

#生成大小相同的批次
def get_batch(image,label,image_W,image_H,batch_size,capacity):
   # image, label: 要生成batch的图像和标签list
    # image_W, image_H: 图片的宽高
    # batch_size: 每个batch有多少张图片
    # capacity: 队列容量
    # return: 图像和标签的batch
    # 将python.list类型转换成tf能够识别的格式
    image=tf.cast(image,tf.string)
    label=tf.cast(label,tf.int32)
    #生成队列
    input_queue=tf.train.slice_input_producer([image,label])
    image_contents=tf.read_file(input_queue[0])
    label=input_queue[1]
    image=tf.image.decode_jpeg(image_contents,channels=3)#
    # 统一图片大小read_file()读取图片之后,要按照图片格式进行解码。本例程中训练数据是jpg格式的,所以使用decode_jpeg()解码器,如果是其他格式,就要用其他解码器,具
    # 视频方法
    # image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    # 我的方法
    image = tf.image.resize_images(image, [image_H, image_W], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    image = tf.cast(image, tf.float32)
    # image = tf.image.per_image_standardization(image)   # 标准化数据
    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,   # 线程
                                              capacity=capacity)

    # 这行多余?
    # label_batch = tf.reshape(label_batch, [batch_size])

    return image_batch, label_batch

2定义模型,就是定义一个网络结构

#生成大小相同的批次
def get_batch(image,label,image_W,image_H,batch_size,capacity):
   # image, label: 要生成batch的图像和标签list
    # image_W, image_H: 图片的宽高
    # batch_size: 每个batch有多少张图片
    # capacity: 队列容量
    # return: 图像和标签的batch
    # 将python.list类型转换成tf能够识别的格式
    image=tf.cast(image,tf.string)
    label=tf.cast(label,tf.int32)
    #生成队列
    input_queue=tf.train.slice_input_producer([image,label])
    image_contents=tf.read_file(input_queue[0])
    label=input_queue[1]
    image=tf.image.decode_jpeg(image_contents,channels=3)#
    # 统一图片大小read_file()读取图片之后,要按照图片格式进行解码。本例程中训练数据是jpg格式的,所以使用decode_jpeg()解码器,如果是其他格式,就要用其他解码器,具
    # 视频方法
    # image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    # 我的方法
    image = tf.image.resize_images(image, [image_H, image_W], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    image = tf.cast(image, tf.float32)
    # image = tf.image.per_image_standardization(image)   # 标准化数据
    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,   # 线程
                                              capacity=capacity)

    # 这行多余?
    # label_batch = tf.reshape(label_batch, [batch_size])

    return image_batch, label_batch

#定义损失函数,训练函数,评估函数

def losses(logits, labels):
    with tf.variable_scope("loss") as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                       labels=labels, name="xentropy_per_example")
        loss = tf.reduce_mean(cross_entropy, name="loss")
        tf.summary.scalar(scope.name + "loss", loss)
    return loss
#是将稀疏表示的label与输出层计算出来结果做对比,然后因为训练的时候是16张图片一个batch,
#所以再用tf.reduce_mean求一下平均值,就得到了这个batch的平均loss

def trainning(loss, learning_rate):
    with tf.name_scope("optimizer"):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name="global_step", trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


def evaluation(logits, labels):
    with tf.variable_scope("accuracy") as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + "accuracy", accuracy)
    return accuracy

3开始训练

import os
import numpy as np
import tensorflow as tf
# import input_data
# import model

N_CLASSES = 2
IMG_H = 208
IMG_W = 208
BATCH_SIZE = 32
CAPACITY = 2000
MAX_STEP = 15000
learning_rate = 0.0001



train_dir = "data/train/"
logs_train_dir = "log/"
train, train_label = get_files(train_dir)
train_batch, train_label_batch = get_batch(train,
                                            train_label,
                                            IMG_W,
                                            IMG_H,
                                            BATCH_SIZE,
                                            CAPACITY)
train_logits = inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = losses(train_logits, train_label_batch)
train_op = trainning(train_loss, learning_rate)
train_acc = evaluation(train_logits, train_label_batch)

summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()

sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

try:
    for step in np.arange(MAX_STEP):
        if coord.should_stop():
             break
        _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

        if step % 100 == 0:
            print("Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc))
            summary_str = sess.run(summary_op)
            train_writer.add_summary(summary_str, step)
        if step % 2000 == 0 or (step + 1) == MAX_STEP:
            checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
            saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
    print("Done training -- epoch limit reached.")
finally:
    coord.request_stop()

coord.join(threads)
sess.close()

4评估模型,加载图片,进行测试

import os
import numpy as np
import tensorflow as tf
# import input_data
# import model

N_CLASSES = 2
IMG_H = 208
IMG_W = 208
BATCH_SIZE = 32
CAPACITY = 2000
MAX_STEP = 15000
learning_rate = 0.0001



train_dir = "data/train/"
logs_train_dir = "log/"
train, train_label = get_files(train_dir)
train_batch, train_label_batch = get_batch(train,
                                            train_label,
                                            IMG_W,
                                            IMG_H,
                                            BATCH_SIZE,
                                            CAPACITY)
train_logits = inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = losses(train_logits, train_label_batch)
train_op = trainning(train_loss, learning_rate)
train_acc = evaluation(train_logits, train_label_batch)

summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()

sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

try:
    for step in np.arange(MAX_STEP):
        if coord.should_stop():
             break
        _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

        if step % 100 == 0:
            print("Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc))
            summary_str = sess.run(summary_op)
            train_writer.add_summary(summary_str, step)
        if step % 2000 == 0 or (step + 1) == MAX_STEP:
            checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
            saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
    print("Done training -- epoch limit reached.")
finally:
    coord.request_stop()

coord.join(threads)
sess.close()

运行结果是一张图片:
在这里插入图片描述
加载模型进行测试


train_dir = "data/train/"
train, train_label = get_files(train_dir)
image_array = get_one_image(train)

with tf.Graph().as_default():
    BATCH_SIZE = 1
    N_CLASSES = 2

    image = tf.cast(image_array, tf.float32)
    image = tf.reshape(image, [1, 208, 208, 3])
    logit = inference(image, BATCH_SIZE, N_CLASSES)
    logit = tf.nn.softmax(logit)

    x = tf.placeholder(tf.float32, shape=[208, 208, 3])

    logs_train_dir = "log/"
    saver = tf.train.Saver()

    with tf.Session() as sess:
        print("Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Loading success, global_step is %s" % global_step)
        else:
            print("No checkpoint file found")

        prediction = sess.run(logit, feed_dict={x: image_array})
        max_index = np.argmax(prediction)
        if max_index == 0:
            print("This is a cat with possibility %.6f" % prediction[:, 0])
        else:
            print("This is a dog with possibility %.6f" % prediction[:, 1])



在这里插入图片描述

参考文献

基于TensorFlow的Cats vs. Dogs(猫狗大战)实现和详解(2) - Sual - CSDN博客 https://blog.csdn.net/qq_16137569/article/details/72830964

基于TensorFlow的Cats vs. Dogs(猫狗大战)实现和详解(1) - Sual - CSDN博客 https://blog.csdn.net/qq_16137569/article/details/72802387

  • 3
    点赞
  • 34
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值