针对tensorflow的版本为:r0.12
简单版:
# -*- coding: utf-8 -*-
"""
Created on Tue May 2 14:18:24 2017
@author: DidiLv
Email: Eric2014_Lv@sjtu.edu.cn
"""
## import the examples start
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
##import the examples end
import tensorflow as tf
# storage allocation
x = tf.placeholder(tf.float32,[None,784]) # place the input
y_ = tf.placeholder(tf.float32,[None,10]) # label or for cross-entropy
# model parameters start
W = tf.Variable(tf.zeros([784,10])) # weight of regression
b = tf.Variable(tf.zeros([10])) # bias
y = tf.nn.softmax(tf.matmul(x,W) + b) # y = x * W + b
# loss function: cross entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# model parameters end
# train step
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# initialization global variables
init = tf.global_variables_initializer()
# start the Graph
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict = {x:batch_xs, y_: batch_ys})
# correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
深度版:
# -*- coding: utf-8 -*-
"""
Created on Tue May 2 19:00:01 2017
Email: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
# Train
tf.global_variables_initializer().run()
for _ in range(1000):
batch = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))