【tensorflow:实战Google深度学习框架】-tip7 模型保存并导入返回预测结果

首先新建一个文件夹:‘model’,用来存放保存的模型
新建一个文件夹:‘logs’,用来存放图结构
新建一个文件夹‘MNIST_data’,用来存放数据集

模型保存:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('/MNIST_data',one_hot=False)
with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, 784], name='x')
    y_ = tf.placeholder(tf.int32, [None, ], name='y')
with tf.name_scope('Net'):
    dense1 = tf.layers.dense(inputs=x,
                             units=1024,
                             activation=tf.nn.relu,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.nn.l2_loss)
    dense2 = tf.layers.dense(inputs=dense1,
                             units=512,
                             activation=tf.nn.relu,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.nn.l2_loss)
with tf.name_scope('prediction'):
    logits = tf.layers.dense(inputs=dense2,
                             units=10,
                             activation=None,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.nn.l2_loss, name='pre')
with tf.name_scope('loss'):
    loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
    tf.summary.scalar('loss', loss)
with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
    acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', acc)
with tf.name_scope('train_step'):
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

merged = tf.summary.merge_all()
sess = tf.InteractiveSession()

sess.run(tf.global_variables_initializer())
tf.summary.FileWriter('logs', sess.graph)
saver = tf.train.Saver()
# saver = tf.train.Saver(max_to_keep=1)
max_acc = 0
for i in range(100):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_op, feed_dict={x: batch_xs, y_: batch_ys})
    val_loss, val_acc = sess.run([loss, acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
    print('epoch:%d, val_loss:%f, val_acc:%f' % (i, val_loss, val_acc))
    if val_acc > max_acc:
        max_acc = val_acc
        saver.save(sess, './model/mnist.ckpt', global_step=i + 1)

导入模型并输出预测结果:

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/MNIST_data',one_hot=False)
saver = tf.train.import_meta_graph('./model/mnist.ckpt-100.meta')
with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint("model/"))
    graph = tf.get_default_graph()
    tensorName = [tensor.name for tensor in graph.as_graph_def().node]
    for _ in tensorName:
        print(tensorName,'\n')
    logits = graph.get_tensor_by_name("prediction/pre/BiasAdd:0")
    print(logits)
    for i in range(100):
        batch_xs, batch_ys = mnist.train.next_batch(50)
        prediction = sess.run(logits, feed_dict={'input/x:0': batch_xs, 'input/y:0': batch_ys})
        prediction = np.array(prediction)
        print(prediction.argmax(axis=1))

输出结果
输出结果
参考博客:非常抱歉忘记了,实现了很长时间,然后才想起来写博客。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值