Tensorflow Dataset api介绍 mnist举例

写完了Dataset api介绍,但真到读取数据集时可能还是不知道怎么用。。所以,对mnist进行举例。

还不知道Dataset api怎么用得,可以去传送门1传送门2查看~~~

不多说上代码:

第一个版本——(initializable迭代器+placeholder的方式)

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data


mnist = input_data.read_data_sets("E:/database/mnist", one_hot=True)
train_images = mnist.train.images
train_labels = mnist.train.labels
test_images = mnist.test.images
test_labels = mnist.test.labels

train_data = (train_images, train_labels)
test_data = (test_images, test_labels)




def mnist_model(inputs):
    with tf.variable_scope('MNIST'):
        inputs = tf.reshape(inputs, [-1, 28, 28, 1])
        net = tf.layers.conv2d(inputs, 32, 5, padding='same', activation=tf.nn.relu, name='conv1')
        net = tf.layers.max_pooling2d(net, 2, 2, name='pool1')
        net = tf.layers.conv2d(net, 64, 5, padding='same', activation=tf.nn.relu, name='conv2')
        net = tf.layers.max_pooling2d(net, 2, 2, name='pool2')
        net = tf.layers.flatten(net, name='flatten')
        net = tf.layers.dense(net, 1024, activation=tf.nn.relu, name='fc3')
        net = tf.layers.dense(net, 10, name='logits')

    return net

def dataset_generation(images, labels, buffer_size=10000, batch_size=64, repeat=True, shuffle=False):
    '''
    Generate tensorflow dataset object
    images: numpy array format
    labels: numpy array format
    '''
    dataset = tf.data.Dataset.from_tensor_slices((images, labels))
    # dataset = dataset.map()
    if repeat:
        dataset = dataset.repeat()
    if shuffle:
        dataset = dataset.shuffle(buffer_size=buffer_size)
    dataset = dataset.batch(batch_size)
    return dataset

def optimizer_op(logits, labels):
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
    optimizer = tf.train.AdamOptimizer(learning_rate = 0.01).minimize(loss)
    return optimizer, loss


def acc(logits, labels):
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    return accuracy


x = tf.placeholder(tf.float32, shape=[None, 784])/255.
y = tf.placeholder(tf.float32, shape=[None, 10])
# 创建数据
dataset = dataset_generation(x, y)
# 创建迭代器
iterator = dataset.make_initializable_iterator()
features, labels = iterator.get_next()


out = mnist_model(features)
train_op, loss = optimizer_op(out, labels)
acc_out = acc(out, labels)


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(3):
        # 训练
        train_acc = []
        sess.run(iterator.initializer, feed_dict={ x: train_data[0], y: train_data[1]})
        for _ in range(20):
            acc, _, train_loss = sess.run([acc_out, train_op, loss])
            train_acc.append(acc)
        acc_mean = np.mean(train_acc) 

        # 验证
        test_acc = []
        sess.run(iterator.initializer, feed_dict={ x: test_data[0], y: test_data[1]})
        for i in range(20):
            acc_ = sess.run(acc_out)
            test_acc.append(acc_)
        acc_mean_ = np.mean(test_acc)

        print('epoch: {:03d} | train acc: {:.2f} | test acc: {:.2f} | train loss: {:.2f}'.format(epoch, acc_mean, acc_mean_, train_loss))

  

——————————————输出——————————————————————
epoch: 000 | train acc: 0.43 | test acc: 0.71 | train loss: 0.55
epoch: 001 | train acc: 0.85 | test acc: 0.87 | train loss: 0.21
epoch: 002 | train acc: 0.94 | test acc: 0.90 | train loss: 0.06

第二个版本————(reinitializable)

这里重复得代码不贴出了
# read data
mnist = input_data.read_data_sets("E:/database/mnist", one_hot=True)
train_images = mnist.train.images
train_labels = mnist.train.labels
test_images = mnist.test.images
test_labels = mnist.test.labels
train_data = (train_images, train_labels)
test_data = (test_images, test_labels)

# 创建数据
train_dataset = dataset_generation(train_data[0], train_data[1])
test_dataset = dataset_generation(test_data[0], test_data[1], repeat=False)
# 创建迭代器
iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
features, labels = iterator.get_next()
# 对传入的不同数据源定义迭代器初始化节点
training_init_op = iterator.make_initializer(train_dataset)
test_init_op = iterator.make_initializer(test_dataset)

第三个版本————(feedable迭代和placeholder结合)

代码没写。。。。详细查看这个人的代码也可以的

我运行第二个版本的时候,发现内存急剧飙升(8G内存),但第一种形式就好的多,在Dataset api的博客中介绍过,创建数据集的时候,我们读入的时numpy类型,然后转换为tensor类型,这样会造成内存不够(数组会被多次复制)。。。但没想到运行mnist数据集都勉强啊!还是用placeholder来进行传入数据吧!

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值