声明一下:
1.环境:ubuntu16.04.3+CDUA8.0.44+cuDNN6.0+tensorflow1.3.0
2.我是VirtualEnv方式安装的,安装在/home下。我的工程是创建在/home/worcspace/mnist
下面是运行的全过程:
那些警告是我没有编那些CPU加速的东西,不用管它。反正是用GPU的。
下面给出代码:
代码不懂的可以参考中文社区的教程:中文社区mnist入门教程
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import tensorflow as tf
#下载安装数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("workspace/mnist/data", one_hot=True)
#实现回归模型
import tensorflow as tf
x = tf.placeholder("float", [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, w) + b)
#训练模型
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
#评估模型
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})