tensorflow第二课

python版本:Python 3.5.2 |Anaconda 4.2.0 (64-bit)| (default, Jul  5 2016, 11:41:13) [MSC v.1900 64 bit (AMD64)] on win32

tensorflow版本:1.13.1

IDE:pycharm
--------------------- 
作者:华北月下老人 
来源:CSDN 
原文:https://blog.csdn.net/qwer7512090/article/details/88429625 
版权声明:本文为博主原创文章,转载请附上博文链接!

写了下第二个helloworld,mnist手写数字识别,有两个Session,第一个用来训练,第二个用来验证。

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import pylab

mnist = input_data.read_data_sets("MNIST_DATA/",one_hot=True)
#自动下载安# 装mnist

# print("输入数据",mnist.train.images)
# print(mnist.train.images.shape)
# import pylab
# im = mnist.train.images[1]
# im = im.reshape(-1,28)
# pylab.imshow(im)
# pylab.show()
#
# print("测试数据",mnist.test.images.shape)
# print("验证数据",mnist.validation.images.shape)

tf.reset_default_graph()

#定义占位符
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#None代表可以是任意长度的,x代表能输入任意数量的图像

W = tf.Variable(tf.random_normal([784,10]))
b = tf.Variable(tf.zeros([10]))
pred = tf.nn.softmax(tf.matmul(x,W)+b)
#softmax分类

#定义损失函数

cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred)))

#参数
learning_rate = 0.01
#使用梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

training_epochs =  50
batch_size = 100
display_step = 1

saver = tf.train.Saver()
model_path = "log/mnist/nmist_model.ckpt"
#启动session

# with tf.Session() as sess:
#     sess.run(tf.global_variables_initializer())  #初始化op
#     #开始训练
#     for epoch in range(training_epochs):
#         avg_cost = 0
#         total_batch = int(mnist.train.num_examples/batch_size)
#         #循环一遍所有数据集
#         for i in range(total_batch):
#             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
#             #运行优化器
#             _,c = sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})
#
#             #计算平均损失
#             avg_cost+=c/total_batch
#
#         if (epoch+1)%display_step == 0:
#             print("Epoch:","%04d" % (epoch+1),"cost = ","{:.9f}".format(avg_cost))
#
#     #测试
#     correct_or_not = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
#     #计算准确率
#     accuracy = tf.reduce_mean(tf.cast(correct_or_not,tf.float32))
#     print("Accuracy:",accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
#     print("Finished!")
#
#     #保存模型
#     save_path = saver.save(sess,model_path)
#     print("model saved in file:%s" % save_path)

# print("sstarting 2ed session")

with tf.Session() as sess:
    #初始化变量
    sess.run(tf.global_variables_initializer())
    #恢复模型变量
    saver.restore(sess,model_path)
    #测试
    correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
    #计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

    output = tf.argmax(pred,1)
    batch_xs,batch_ys = mnist.train.next_batch(2)
    outputval,predval = sess.run([output,pred],feed_dict={x:batch_xs})
    #output是具体的数字,pred是10位标签
    print(outputval,predval,batch_ys)

    im = batch_xs[0]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()

    im = batch_xs[1]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()

 

 

 

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值