Tensorflow猫狗识别

分为三个子程序,input_data直接将图片转化为Tensorflow形式,并转化为batch_size形式,最后注释了的为测试函数

import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os


train_dir = "D:\\python学习\\deep_learn\\kaggle\\train\\train\\"  # 数据根目录


def get_files(file_dir):
    """
    Args:
        file_dir: file directory
    Returns:
        list of images and labels
    """
    cats = []
    label_cats = []
    dogs = []
    label_dogs = []
    for file in os.listdir(file_dir):
        name = file.split(sep='.')
        if name[0] == 'cat':
            cats.append(file_dir + file)
            label_cats.append(0)
        else:
            dogs.append(file_dir + file)
            label_dogs.append(1)
    print('There are %d cats\nThere are %d dogs' % (len(cats), len(dogs)))

    image_list = np.hstack((cats, dogs))
    label_list = np.hstack((label_cats, label_dogs))

    temp = np.array([image_list, label_list])
    temp = temp.transpose()
    np.random.shuffle(temp)

    image_list = list(temp[:, 0])
    label_list = list(temp[:, 1])
    label_list = [int(i) for i in label_list]

    return image_list, label_list


def get_batch(image, label, image_W, image_H, batch_size, capacity):
    '''
    Args:
        image: list type
        label: list type
        image_W: image width
        image_H: image height
        batch_size: batch size
        capacity: the maximum elements in queue
    Returns:
        image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32
        label_batch: 1D tensor [batch_size], dtype=tf.int32
    '''

    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)

    # make an input queue
    input_queue = tf.train.slice_input_producer([image, label])

    label = input_queue[1]
    image_contents = tf.read_file(input_queue[0])
    image = tf.image.decode_jpeg(image_contents, channels=3)

    ######################################
    # data argumentation should go to here
    ######################################

    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)

    # if you want to test the generated batches of images, you might want to comment the following line.
    image = tf.image.per_image_standardization(image)

    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,
                                              capacity=capacity)

    # you can also use shuffle_batch
    #    image_batch, label_batch = tf.train.shuffle_batch([image,label],
    #                                                      batch_size=BATCH_SIZE,
    #                                                      num_threads=64,
    #                                                      capacity=CAPACITY,
    #                                                      min_after_dequeue=CAPACITY-1)

    label_batch = tf.reshape(label_batch, [batch_size])
    image_batch = tf.cast(image_batch, tf.float32)

    return image_batch, label_batch


"""

BATCH_SIZE = 2
CAPACITY = 256
IMG_W = 208
IMG_H = 208

image_list, label_list = get_files(train_dir)
image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

with tf.Session() as sess:
    i = 0
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    try:
        while not coord.should_stop() and i < 1:

            img, label = sess.run([image_batch, label_batch])

            # just test one batch
            for j in np.arange(BATCH_SIZE):
                print('label: %d' % label[j])  # j-index of quene of Batch_size
                plt.imshow(img[j, :, :, :])
                plt.show()
            i += 1

    except tf.errors.OutOfRangeError:
        print('done!')
    finally:
        coord.request_stop()
    coord.join(threads)
"""

model程序用Tensorflow构建了CNN网络,并定义loss,accuracy,traning函数

import tensorflow as tf


def inference(images, batch_size, n_classes):
    # 第一层卷积层
    with tf.variable_scope('conv1') as scope:
        # 卷积核为 3*3,图片的厚度为3,输出是16个featuremap
        weights = tf.get_variable('weights',shape=[3,3,3,16],dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases', shape=[16],dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))

        # 卷积操作,strides为卷积步长,padding方式为SAME不改变图像大小
        conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME')
        # 偏置层
        pre_activation = tf.nn.bias_add(conv, biases)
        # 激活函数
        conv1 =tf.nn.relu(pre_activation, name='conv1')
    # 池化层加抑制层
    with tf.variable_scope('pooling1_lrn') as scope:
        pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME',name='pooling1')
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
    # 第二层卷积层
    with tf.variable_scope('con2') as scope:
        # 卷积核为 3*3,图片的厚度为16,输出是16个featuremap
        weights = tf.get_variable('weights',shape=[3,3,16,16],dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases', shape=[16], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        # 卷积操作
        conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name='conv2')

    # 池化抑制层
    with tf.variable_scope('pooling2_lrn') as scope:
        norm2 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
        pool2 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')

    # 第三层全连接层
    with tf.variable_scope('local3') as scope:
        # 将输入形状变为一维,这一层全连接输出为128维
        reshape = tf.reshape(pool2,shape=[batch_size,-1])
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable('weights',
                                  shape = [dim, 128],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases', shape=[128], dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))

        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name='local3')

    # 第四层
    with tf.variable_scope('local4') as scope:
        # 输入为128,输出也是128
        weights = tf.get_variable('weights',
                                  shape = [128,128],
                                  dtype = tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[128],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer((0.1))
                                 )
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')

    # 最后softmax函数变为0,1
    with tf.variable_scope('softmax_linear') as scope:
        weights = tf.get_variable('weights',
                                  shape=[128, n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[n_classes],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer((0.1))
                                 )
        softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')

    return softmax_linear


# 定义损失函数
def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels, name='xentropy_per_example'
        )
        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss


# 定义训练模型
def trainning(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


# 定义测试预测模型
def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

train训练函数

import tensorflow as tf
import numpy as np
import input_data
import model
import os


N_CLASSES = 2     # 输出两个类别
IMG_W = 208        # 修改图片的宽度
IMG_H = 208        # 修改图片的高度
BATCH_SIZE = 32    # 每批数据的大小
CAPACITY = 256     # 容器总量
MAX_STEP = 15000    # 训练步数
learning_rate = 0.0001  # 学习率
train_dir = "D:\\python学习\\deep_learn\\kaggle\\train\\train\\"  # 数据根目录
logs_train_dir = "D:\\python学习\\deep_learn\\kaggle"  # 模型根目录

def run_training():
    # 获取图片和标签集
    train, train_label = input_data.get_files(train_dir)
    # 将图片转化为tensorflow输入并分批次
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY
                                                          )
    # 建立模型

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 定义loss function
    train_loss = model.losses(train_logits, train_label_batch)
    # 训练
    train_op = model.trainning(train_loss, learning_rate)
    # 获取准确率
    train_acc = model.evaluation(train_logits, train_label_batch)
    # 合并summary
    summary_op = tf.summary.merge_all()
    # 开启会话
    sess = tf.Session()
    # 保存summary
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()
    # 初始化模型
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()


if __name__ == '__main__':
    # input_data.get_files(train_dir)
    run_training()


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值