使用TFRecord批量读取MNIST数据集并进行训练

本文介绍了如何利用TensorFlow的TFRecord格式批量读取MNIST数据集,避免使用feed_dict,提高训练效率。首先,将MNIST数据转换为TFRecord文件,接着使用TFRecordReader和parse_single_example解析数据并构造输入队列。网络结构包含两个卷积层、两个池化层、全连接层和dropout,训练过程中每20轮输出一次结果。测试阶段计算平均值,整个过程可通过多线程加速。
摘要由CSDN通过智能技术生成

  上一次主要写了批量读取数据,这一次对MNIST数据集来做一个测试,看看不用feed_dict喂入数据的效果如何。首先生成example的协议内存栈。代码如下:

import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
def create_example(image,label):
    return tf.train.Example(features=tf.train.Features(feature={
        'image':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
        'label':tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
        'num1':tf.train.Feature(float_list=tf.train.FloatList(value=[12.555])),
        'num2':tf.train.Feature(int64_list=tf.train.Int64List(value=[88]))
    }))

在把输入数据通过example模块转换成字符串的形式通过tf.python_io接口写入,生成TFRecord文件。

def create_tfrecord(tfrecord_dir_path,mnist_data_path='../datas/mnist'):
    if not os.path.exists(tfrecord_dir_path):
        os.makedirs(tfrecord_dir_path)
    train_tfrecord_path=os.path.join(tfrecord_dir_path,'train.tfrecord')
    test_tfrecord_path=os.path.join(tfrecord_dir_path,'test.tfrecord')
    mnist=input_data.read_data_sets(train_dir=mnist_data_path,one_hot=True,validation_size=0)
    with tf.python_io.TFRecordWriter(train_tfrecord_path) as train_writer:
        print('执行训练数据生成')
        for idx in range(mnist.train.num_examples):
            image=mnist.train.images[idx]
            label=mnist.train.labels[idx]
            image=np.reshape(image,-1).astype(np.float32)
            label=np.reshape(label,-1).astype(np.float32)
            example=create_example(image.tobytes(),label.tobytes())
            train_writer.write(example.SerializeToString())
    with tf.python_io.TFRecordWriter(test_tfrecord_path) as test_writer:
        print('执行测试数据生成')
        for idx in range(mnist.test.num_examples):
            image = mnist.test.images[idx]
            label = mnist.test.labels[idx]
            image = np.reshape(image, -1).astype(np.float32)
            label = np.reshape(label, -1).astype(np.float32)
            example = create_example(image.tobytes(), label.tobytes())
            test_writer.write(example.SerializeToString())

最后通过tf.TFRecordReader()和tf.parse_single_example()解析器进行解析读取,并且生成队列的形式进行批量读取,把每一个batch进行打乱。

def load_tfrecord(tfrecord_file_path,batch_size,height,width,channels,n_class):
    producer=tf.train.string_input_producer([tfrecord_file_path])
    reader=tf.TFRecordReader()
    _,serialized_example=reader.read(queue=producer)
    features=tf.parse_single_example(serialized_example,features={
        'image':tf.FixedLenFeature([],tf.string),
        'label':tf.FixedLenFeature([],tf.string),
        'num1':tf.FixedLenFeature([],tf.float32),
        'num2':tf.FixedLenFeature([],tf.int64)
    })
    image=tf.decode_raw(features['image'],tf.float32)
    label=tf.decode_raw(features['label'],tf.float32)
    num1=features['num1']
    num2=features['num2']
    image=tf.reshape(image,shape=[height,width,channels])
    label=tf.reshape(label,shape=[n_class])
    image,label,num1,num2=tf.train.shuffle_batch(
        [image,label,num1,num2],
        batch_size=batch_size,
        capacity=batch_size*5,
        num_threads=1,
        min_after_dequeue=batch_size*2
    )
    return image,label,num1,num2

接下来进行神经网络的构建,对MNIST数据集不需要很深的网络,就构建了两个卷积层,两个池化和全连接层,中间还加入了dropout.

def conv2d(x,w,b,strides=1):
    conv=tf.nn.conv2d(x,w,strides=[1,strides,strides,1],padding='SAME')
    conv=tf.nn.bias_add(conv,b)
    return tf.nn.relu(conv)
def max_pool(x,k):
    pool=tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME')
    return pool
def model_net(image,keep_prob):
    n_class=10
    weights={
        'wc1':tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1)),
        'wc2':tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1)),
        'wd3':tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=0.1)),
        'wd4':tf.Variable(tf.truncated_normal([1024,n_class],stddev=0.1))
    }
    bias={
        'bc1':tf.Variable(tf.zeros([32])),
        'bc2':tf.Variable(tf.zeros([64])),
        'bd3':tf.Variable(tf.zeros([1024])),
        'bd4':tf.Variable(tf.zeros([n_class]))
    }
    conv1=conv2d(image,weights['wc1'],bias['bc1'])
    pool1=max_pool(conv1,k=2)
    pool1=tf.nn.dropout(pool1,keep_prob)
    conv2=conv2d(pool1,weights['wc2'],bias['bc2'])
    pool2=max_pool(conv2,k=2)
    pool2=tf.nn.dropout(pool2,keep_prob)
    shape=pool2.get_shape()
    flat_shape=shape[1]*shape[2]*shape[3]
    flat_shape=tf.reshape(pool2,shape=[-1,flat_shape])
    fc1=tf.add(tf.matmul(flat_shape,weights['wd3']),bias['bd3'])
    fc1=tf.nn.dropout(fc1,keep_prob)
    logits=tf.add(tf.matmul(fc1,weights['wd4']),bias['bd4'])
    return logits

然后构建损失函数和训练方式,最后定义准确率,整个神经网络模型已经构建完毕。

def create_loss(logits,labels):
    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=labels))
    return loss
def create_train_opt(loss,learning_rate):
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    train_opt=optimizer.minimize(loss)
    return train_opt
def create_accuracy(logits,labels):
    correct_pred=tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
    return accuracy

开始进行模型训练,直接使用train_image,train_label,test_image,test_label代替占位符接收读取tfrecord文件中的train.tfrecord,test.tfrecord.获取图片的特征特征和标签。并且对是否训练做了一个选择。每隔20轮进行一次输出。

def train():
    learning_rate=1e-3
    batch_size=128
    with tf.Graph().as_default():
        create_tfrecord(tfrecord_dir_path='../datas/mnist')
        train_image,train_label,_,_=load_tfrecord(tfrecord_file_path='../datas/mnist/train.tfrecord',batch_size=batch_size,height=28,width=28,channels=1,n_class=10)
        test_image,test_label,_,_=load_tfrecord(tfrecord_file_path='../datas/mnist/test.tfrecord',batch_size=batch_size,height=28,width=28,channels=1,n_class=10)
        is_train=tf.placeholder_with_default(False,shape=None,name='is_train')
        image=tf.cond(is_train ,lambda :train_image,lambda :test_image)
        label=tf.cond(is_train,lambda :train_label,lambda :test_label)
        logits=model_net(image,keep_prob=0.6)
        loss=create_loss(logits,label)
        train_opt=create_train_opt(loss,learning_rate=learning_rate)
        accuracy=create_accuracy(logits,label)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            coor=tf.train.Coordinator()
            threads=tf.train.start_queue_runners(sess=sess,coord=coor)
            mnist=input_data.read_data_sets('../datas/mnist',one_hot=True)
            step=1
            while True:
                sess.run(train_opt,{is_train:True})
                train_loss,train_acc=sess.run([loss,accuracy],{is_train:True})
                if step%20==0:
                    print('Step:{}-Train Loss:{:.5f}-Train Acc:{:.5f}'.format(step,train_loss,train_acc))
                step+=1
                if train_acc>0.9:
                   break
            coor.request_stop()
            coor.join(threads)

然后测试模块进行一个求平均值。

def test():
    learning_rate = 1e-3
    batch_size = 128
    with tf.Graph().as_default():
        create_tfrecord(tfrecord_dir_path='../datas/mnist')
        train_image, train_label, _, _ = load_tfrecord(tfrecord_file_path='../datas/mnist/train.tfrecord',
                                                       batch_size=batch_size, height=28, width=28, channels=1,
                                                       n_class=10)
        test_image, test_label, _, _ = load_tfrecord(tfrecord_file_path='../datas/mnist/test.tfrecord',
                                                     batch_size=batch_size, height=28, width=28, channels=1, n_class=10)
        is_train = tf.placeholder_with_default(False, shape=None, name='is_train')
        image = tf.cond(is_train, lambda: train_image, lambda: test_image)
        label = tf.cond(is_train, lambda: train_label, lambda: test_label)
        logits = model_net(image, keep_prob=0.6)
        loss = create_loss(logits, label)
        train_opt = create_train_opt(loss, learning_rate=learning_rate)
        accuracy = create_accuracy(logits, label)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            coor = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coor)
            step =1
            mnist = input_data.read_data_sets('../datas/mnist', one_hot=True)
            test_all_total = mnist.test.num_examples
            while True:
                test_all_acc=[]
                test_all_loss=[]
                for _ in range(test_all_total//batch_size):
                    test_batch_loss,test_batch_acc=sess.run([loss,accuracy])
                    test_all_acc.append(test_batch_acc)
                    test_all_loss.append(test_batch_loss)
                    if step%50==0:
                        print('Step:{}-Test Loss:{:.5f}-Test ACC:{:.5f}'.format(step,np.mean(test_all_loss),np.mean(test_all_acc)))
                    step+=1
                    if np.mean(test_all_acc)>0.9:
                        break
            coor.request_stop()
            coor.join(threads)

整个数据读取训练阶段还是比较缓慢,开启多线程读取会快一点。损失函数,准确率变化如下所示:

Step:3380-Train Loss:1.02621-Train Acc:0.65625
Step:3400-Train Loss:0.76859-Train Acc:0.71094
Step:3420-Train Loss:0.88981-Train Acc:0.71094
Step:3440-Train Loss:0.57895-Train Acc:0.83594
Step:3460-Train Loss:0.71796-Train Acc:0.75000
Step:3480-Train Loss:0.81050-Train Acc:0.74219

完整代码:

import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
def create_example(image,label):
    return tf.train.Example(features=tf.train.Features(feature={
        'image':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
        'label':tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
        'num1':tf.train.Feature(float_list=tf.train.FloatList(value=[12.555])),
        'num2':tf.train.Feature(int64_list=tf.train.Int64List(value=[88]))
    }))
def create_tfrecord(tfrecord_dir_path,mnist_data_path='../datas/mnist'):
    if not os.path.exists(tfrecord_dir_path):
        os.makedirs(tfrecord_dir_path)
    train_tfrecord_path=os.path.join(tfrecord_dir_path,'train.tfrecord')
    test_tfrecord_path=os.path.join(tfrecord_dir_path,'test.tfrecord')
    mnist=input_data.read_data_sets(train_dir=mnist_data_path,one_hot=True,validation_size=0)
    with tf.python_io.TFRecordWriter(train_tfrecord_path) as train_writer:
        print('执行训练数据生成')
        for idx in range(mnist.train.num_examples):
            image=mnist.train.images[idx]
            label=mnist.train.labels[idx]
            image=np.reshape(image,-1).astype(np.float32)
            label=np.reshape(label,-1).astype(np.float32)
            example=create_example(image.tobytes(),label.tobytes())
            train_writer.write(example.SerializeToString())
    with tf.python_io.TFRecordWriter(test_tfrecord_path) as test_writer:
        print('执行测试数据生成')
        for idx in range(mnist.test.num_examples):
            image = mnist.test.images[idx]
            label = mnist.test.labels[idx]
            image = np.reshape(image, -1).astype(np.float32)
            label = np.reshape(label, -1).astype(np.float32)
            example = create_example(image.tobytes(), label.tobytes())
            test_writer.write(example.SerializeToString())
def load_tfrecord(tfrecord_file_path,batch_size,height,width,channels,n_class):
    producer=tf.train.string_input_producer([tfrecord_file_path])
    reader=tf.TFRecordReader()
    _,serialized_example=reader.read(queue=producer)
    features=tf.parse_single_example(serialized_example,features={
        'image':tf.FixedLenFeature([],tf.string),
        'label':tf.FixedLenFeature([],tf.string),
        'num1':tf.FixedLenFeature([],tf.float32),
        'num2':tf.FixedLenFeature([],tf.int64)
    })
    image=tf.decode_raw(features['image'],tf.float32)
    label=tf.decode_raw(features['label'],tf.float32)
    num1=features['num1']
    num2=features['num2']
    image=tf.reshape(image,shape=[height,width,channels])
    label=tf.reshape(label,shape=[n_class])
    image,label,num1,num2=tf.train.shuffle_batch(
        [image,label,num1,num2],
        batch_size=batch_size,
        capacity=batch_size*5,
        num_threads=1,
        min_after_dequeue=batch_size*2
    )
    return image,label,num1,num2
def conv2d(x,w,b,strides=1):
    conv=tf.nn.conv2d(x,w,strides=[1,strides,strides,1],padding='SAME')
    conv=tf.nn.bias_add(conv,b)
    return tf.nn.relu(conv)
def max_pool(x,k):
    pool=tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME')
    return pool
def model_net(image,keep_prob):
    n_class=10
    weights={
        'wc1':tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1)),
        'wc2':tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1)),
        'wd3':tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=0.1)),
        'wd4':tf.Variable(tf.truncated_normal([1024,n_class],stddev=0.1))
    }
    bias={
        'bc1':tf.Variable(tf.zeros([32])),
        'bc2':tf.Variable(tf.zeros([64])),
        'bd3':tf.Variable(tf.zeros([1024])),
        'bd4':tf.Variable(tf.zeros([n_class]))
    }
    conv1=conv2d(image,weights['wc1'],bias['bc1'])
    pool1=max_pool(conv1,k=2)
    pool1=tf.nn.dropout(pool1,keep_prob)
    conv2=conv2d(pool1,weights['wc2'],bias['bc2'])
    pool2=max_pool(conv2,k=2)
    pool2=tf.nn.dropout(pool2,keep_prob)
    shape=pool2.get_shape()
    flat_shape=shape[1]*shape[2]*shape[3]
    flat_shape=tf.reshape(pool2,shape=[-1,flat_shape])
    fc1=tf.add(tf.matmul(flat_shape,weights['wd3']),bias['bd3'])
    fc1=tf.nn.dropout(fc1,keep_prob)
    logits=tf.add(tf.matmul(fc1,weights['wd4']),bias['bd4'])
    return logits
def create_loss(logits,labels):
    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=labels))
    return loss
def create_train_opt(loss,learning_rate):
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    train_opt=optimizer.minimize(loss)
    return train_opt
def create_accuracy(logits,labels):
    correct_pred=tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
    return accuracy
def train():
    learning_rate=1e-3
    batch_size=128
    with tf.Graph().as_default():
        create_tfrecord(tfrecord_dir_path='../datas/mnist')
        train_image,train_label,_,_=load_tfrecord(tfrecord_file_path='../datas/mnist/train.tfrecord',batch_size=batch_size,height=28,width=28,channels=1,n_class=10)
        test_image,test_label,_,_=load_tfrecord(tfrecord_file_path='../datas/mnist/test.tfrecord',batch_size=batch_size,height=28,width=28,channels=1,n_class=10)
        is_train=tf.placeholder_with_default(False,shape=None,name='is_train')
        image=tf.cond(is_train ,lambda :train_image,lambda :test_image)
        label=tf.cond(is_train,lambda :train_label,lambda :test_label)
        logits=model_net(image,keep_prob=0.6)
        loss=create_loss(logits,label)
        train_opt=create_train_opt(loss,learning_rate=learning_rate)
        accuracy=create_accuracy(logits,label)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            coor=tf.train.Coordinator()
            threads=tf.train.start_queue_runners(sess=sess,coord=coor)
            mnist=input_data.read_data_sets('../datas/mnist',one_hot=True)
            step=1
            while True:
                sess.run(train_opt,{is_train:True})
                train_loss,train_acc=sess.run([loss,accuracy],{is_train:True})
                if step%20==0:
                    print('Step:{}-Train Loss:{:.5f}-Train Acc:{:.5f}'.format(step,train_loss,train_acc))
                step+=1
                if train_acc>0.9:
                   break
            coor.request_stop()
            coor.join(threads)
def test():
    learning_rate = 1e-3
    batch_size = 128
    with tf.Graph().as_default():
        create_tfrecord(tfrecord_dir_path='../datas/mnist')
        train_image, train_label, _, _ = load_tfrecord(tfrecord_file_path='../datas/mnist/train.tfrecord',
                                                       batch_size=batch_size, height=28, width=28, channels=1,
                                                       n_class=10)
        test_image, test_label, _, _ = load_tfrecord(tfrecord_file_path='../datas/mnist/test.tfrecord',
                                                     batch_size=batch_size, height=28, width=28, channels=1, n_class=10)
        is_train = tf.placeholder_with_default(False, shape=None, name='is_train')
        image = tf.cond(is_train, lambda: train_image, lambda: test_image)
        label = tf.cond(is_train, lambda: train_label, lambda: test_label)
        logits = model_net(image, keep_prob=0.6)
        loss = create_loss(logits, label)
        train_opt = create_train_opt(loss, learning_rate=learning_rate)
        accuracy = create_accuracy(logits, label)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            coor = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coor)
            step =1
            mnist = input_data.read_data_sets('../datas/mnist', one_hot=True)
            test_all_total = mnist.test.num_examples
            while True:
                test_all_acc=[]
                test_all_loss=[]
                for _ in range(test_all_total//batch_size):
                    test_batch_loss,test_batch_acc=sess.run([loss,accuracy])
                    test_all_acc.append(test_batch_acc)
                    test_all_loss.append(test_batch_loss)
                    if step%50==0:
                        print('Step:{}-Test Loss:{:.5f}-Test ACC:{:.5f}'.format(step,np.mean(test_all_loss),np.mean(test_all_acc)))
                    step+=1
                    if np.mean(test_all_acc)>0.9:
                        break
            coor.request_stop()
            coor.join(threads)
if __name__ == '__main__':
    train()
    #test()


 

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值