tfrecords +tfdata,2种方式训练mnist

 

TFRecords是tensorflow存储数据的一种二进制文件,能更好的利用内存,更方便复制和移动,并且不需要单独的标签文件,类似于caffe中的LMDB和LvevelDB,极大的提高了IO吞吐。

TFRecords文件包含了tf.train.Example 协议内存块(protocol buffer)(协议内存块包含了字段 Features)。我们可以写一段代码获取你的数据, 将数据填入到Example协议内存块(protocol buffer),将协议内存块序列化为一个字符串, 并且通过tf.python_io.TFRecordWriter 写入到TFRecords文件。

从TFRecords文件中读取数据, 可以使用tf.TFRecordReader的tf.parse_single_example解析器。这个操作可以将Example协议内存块(protocol buffer)解析为张量。

 

优势:

第一,tensorflow里的graph能够记住状态(state),这使得TFRecordReader能够记住tfrecord的位置,并且始终能返回下一个。而这就要求我们在使用之前,必须初始化整个graph,这里我们使用了函数tf.initialize_all_variables()来进行初始化。

第二,tensorflow中的队列和普通的队列差不多,不过它里面的operation和tensor都是符号型的(symbolic),在调用sess.run()时才执行。

第三, TFRecordReader会一直弹出队列中文件的名字,直到队列为空。

 

tfrecords 方式训练:

import os
import tensorflow as tf
from PIL import Image

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

def data_to_tfrecord(images, labels, filename):
    #Save data into TFRecord
    if os.path.isfile(filename):
        print("%s exists" % filename)
        return
    print("Converting data into %s ..." % filename)
    cwd = os.getcwd()
    writer = tf.python_io.TFRecordWriter(filename)
    for index, img_name in enumerate(images):
        print(index)
        img = Image.open(img_name)
        img = img.resize((28, 28))
        img_raw = img.tobytes()
       
        example = tf.train.Example(features=tf.train.Features(feature={
            "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(labels[index])])),
            'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
        }))
        writer.write(example.SerializeToString())  # Serialize To String
    writer.close()

def tfrecord_to_data(filename):
    # generate a queue with a given file name
    print("reading tfrecords from {}".format(filename))
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)  
    features = tf.parse_single_example(serialized_example,features={
'label': tf.FixedLenFeature([], tf.int64),#固定长度label
#'label': tf.VarLenFeature(tf.int64),#可变长label
'img_raw': tf.FixedLenFeature([], tf.string),
})
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [28, 28, 3])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int64)
    return img, label



def decode_tfrecords(filename):
    # show the tfrecords
    for serialized_example in tf.python_io.tf_record_iterator(filename):
        example = tf.train.Example()
        example.ParseFromString(serialized_example)

        image = example.features.feature['img_raw'].bytes_list.value
        label = example.features.feature['label'].int64_list.value
        print(image, label)





def read_data_from_paths(file_path, name):
    labels = []
    file_names = []

    file_name = os.path.join(file_path, name)
    train_txt = open(file_name,'r')

    for idx in train_txt:
        idx=idx.rstrip('\n')
        spt = idx.split(' ')
        file_names.append(os.path.join(file_path, spt[0]))
        labels.append(spt[1])
    return file_names, labels


def train():
    #network
    batch_size = 64
    inputs = tf.placeholder(tf.float32, [batch_size, 28, 28, 3], name='inputs')
    conv1 = tf.layers.conv2d(inputs=inputs, filters=64, kernel_size=(3, 3), padding="same", activation=None)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    conv2 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3), padding="same", activation=None)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 128])
    fc1 = tf.layers.dense(pool2_flat, 500, activation=tf.nn.relu)
    fc2 = tf.layers.dense(fc1, 10, activation=tf.nn.relu)
    y_out = tf.nn.softmax(fc2)

    y_ = tf.placeholder(tf.float32, [batch_size, 10])
    cross_entropy = -tf.reduce_mean(y_ * tf.log(y_out))  # 计算交叉熵

    learning_rate=1e-3
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1))  # 判断预测标签和实际标签是否匹配
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    img, label = tfrecord_to_data("./mnist_train.tfrecords")
    img_batch, label_batch = tf.train.shuffle_batch([img, label],batch_size=batch_size, capacity=2000,min_after_dequeue=1000)

    init = tf.global_variables_initializer()

    with tf.Session() as session:
        session.run(init)
        threads = tf.train.start_queue_runners()
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=20)
        for i in range(400):
            img_batch_i, label_batch_i = session.run([img_batch, tf.one_hot(label_batch, depth=10)])

            feed = {inputs: img_batch_i, y_: label_batch_i}
            loss,_,acc=session.run([cross_entropy,train_step,accuracy], feed_dict=feed)

            print("step%d loss:%f accuracy:%F"%(i,loss,acc))
            if i>100:
                learning_rate=learning_rate*0.1
        saver.save(session, "./save/mnist.ckpt")


if __name__=="__main__":
	#function1,image to tfrecords,not use comment
    #file_path=""
    #name="test.txt"
    #file_names, labels=read_data_from_paths(file_path, name)
    #tfrecord_name="mnist_test.tfrecords"
    #data_to_tfrecord(file_names, labels, tfrecord_name)

    #function2,decode tfrecords,not use comment
    #decode_tfrecords("./mnist_test.tfrecords")
    #decode_tfrecords("E:/test_2k.tfrecords")

    #function3,tfrecords to images,not use comment
    train()

tf.data方式训练:

import os
import math
import tensorflow as tf
from PIL import Image
from functools import partial

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

def get_tf_dataset(dataset_text_file,batch_size=64, channels=3,crop_size=[28,28],shuffle_size=200,augmentation=False):
    def aug_1(image):
        image = tf.image.random_brightness(image, max_delta=2. / 255.)
        image = tf.image.random_saturation(image, lower=0.01, upper=0.05)
        image = tf.image.random_hue(image, max_delta=0.05)
        image = tf.image.random_contrast(image, lower=0.01, upper=0.05)
        return image

    def aug_2(image):
        image = tf.image.random_saturation(image, lower=0.01, upper=0.05)
        image = tf.image.random_brightness(image, max_delta=2. / 255.)
        image = tf.image.random_contrast(image, lower=0.01, upper=0.05)
        image = tf.image.random_hue(image, max_delta=0.05)
        return image

    def aug_3(image):
        image = tf.image.random_contrast(image, lower=0.01, upper=0.05)
        image = tf.image.random_hue(image, max_delta=0.05)
        image = tf.image.random_brightness(image, max_delta=2. / 255.)
        image = tf.image.random_saturation(image, lower=0.01, upper=0.05)
        return image

    def aug_4(image):
        image = tf.image.random_hue(image, max_delta=0.05)
        image = tf.image.random_saturation(image, lower=0.01, upper=0.05)
        image = tf.image.random_contrast(image, lower=0.01, upper=0.05)
        image = tf.image.random_brightness(image, max_delta=2. / 255.)
        return image

    def _parse_function(filename, label):
        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_jpeg(image_string, channels=channels)
        image = tf.image.resize_images(image_decoded, crop_size)
        image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
        if augmentation:
            #tensorflow1.7支持tf.contrib.image.rotate
            angle = tf.reshape(tf.random_uniform([1], -math.pi/12, math.pi/12, tf.float32), [])
            image = tf.contrib.image.rotate(image, angle)
            #image = tf.image.random_flip_left_right(image)

            #image = tf.random_crop(image, [crop_size, crop_size, 3])
            
            p1 = partial(aug_1, image)
            p2 = partial(aug_2, image)
            p3 = partial(aug_3, image)
            p4 = partial(aug_4, image)

            k = tf.reshape(tf.random_uniform([1], 0, 4, tf.int32), [])
            image = tf.case([(tf.equal(k, 0), p1),
                             (tf.equal(k, 1), p2),
                             (tf.equal(k, 2), p3),
                             (tf.equal(k, 3), p4)],
                            default=p1,
                            exclusive=True)
        
        
        return image, label

    def read_labeled_image_list(dataset_text_file):
        filenames=[]
        labels=[]
        with open(dataset_text_file,"r",encoding="utf-8") as f_l:
            filenames_lables=f_l.readlines()
        for filename_lable in filenames_lables:
            filenames.append(filename_lable.split(" ")[0])
            labels.append(int(filename_lable.split(" ")[1].strip("\n")))
        return filenames,labels

    filenames, labels = read_labeled_image_list(dataset_text_file)

    filenames = tf.constant(filenames, name='filename_list')
    labels = tf.constant(labels, name='label_list')

    #tensorflow1.3:tf.contrib.data.Dataset.from_tensor_slices
    #tensorflow1.4+:tf.data.Dataset.from_tensor_slices
    dataset = tf.contrib.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.shuffle(shuffle_size)
    dataset = dataset.map(_parse_function)
    dataset = dataset.batch(batch_size=batch_size)
    dataset = dataset.repeat()

    return dataset


def train():
    #network
    batch_size = 64
    inputs = tf.placeholder(tf.float32, [batch_size, 28, 28, 3], name='inputs')
    conv1 = tf.layers.conv2d(inputs=inputs, filters=64, kernel_size=(3, 3), padding="same", activation=None)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    conv2 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=(3, 3), padding="same", activation=None)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 128])
    fc1 = tf.layers.dense(pool2_flat, 500, activation=tf.nn.relu)
    fc2 = tf.layers.dense(fc1, 10, activation=tf.nn.relu)
    y_out = tf.nn.softmax(fc2)

    y_ = tf.placeholder(tf.float32, [batch_size, 10])
    cross_entropy = -tf.reduce_mean(y_ * tf.log(y_out))  # 计算交叉熵

    learning_rate=1e-3
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1))  # 判断预测标签和实际标签是否匹配
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    dataset = get_tf_dataset(dataset_text_file="./train.txt",batch_size=64)
    iterator = dataset.make_one_shot_iterator()
    img_batch, label_batch = iterator.get_next()

    init = tf.global_variables_initializer()

    with tf.Session() as session:
        session.run(init)
        threads = tf.train.start_queue_runners()
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=20)
        for i in range(400):
            img_batch_i, label_batch_i = session.run([img_batch, tf.one_hot(label_batch, depth=10)])

            feed = {inputs: img_batch_i, y_: label_batch_i}
            loss,_,acc=session.run([cross_entropy,train_step,accuracy], feed_dict=feed)

            print("step%d loss:%f accuracy:%F"%(i,loss,acc))
            if i>100:
                learning_rate=learning_rate*0.1
        saver.save(session, "./save/mnist.ckpt")


if __name__=="__main__":
    train()

 

程序+图片链接:http://download.csdn.net/download/qq_14845119/10199150


reference:

https://developers.googleblog.com/2017/09/introducing-tensorflow-datasets.html


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值