github博客传送门
博客园传送门
本章所需知识:
- 没有基础的请观看深度学习系列视频
- tensorflow
- Python基础
资料下载链接:
- 深度学习基础网络模型(mnist手写体识别数据集)
MNIST数据集手写体识别(MLP实现)
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets('../MNIST_data/', one_hot=True)
class MLPNet:
def __init__(self):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, 784], name='input_x')
self.y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='input_label')
self.w1 = tf.Variable(tf.truncated_normal(shape=[784, 100], dtype=tf.float32, stddev=tf.sqrt(1 / 100)))
self.b1 = tf.Variable(tf.zeros([100], dtype=tf.float32))
self.w2 = tf.Variable(tf.truncated_normal(shape=[100, 10], dtype=tf.float32, stddev=tf.sqrt(1 / 10)))
self.b2 = tf.Variable(tf.zeros([10], dtype=tf.float32))
def forward(self):
self.forward_1 = tf.nn.relu(tf.matmul(self.x, self.w1) + self.b1)
self.forward_2 = tf.nn.relu(tf.matmul(self.forward_1, self.w2) + self.b2)
self.output = tf.nn.softmax(self.forward_2)
def backward(self):
self.cost = tf.reduce_mean(tf.square(self.output - self.y))
self.opt = tf.train.AdamOptimizer().minimize(self.cost)
def acc(self):
self.z = tf.equal(tf.argmax(self.output, 1, name='output_max'), tf.argmax(self.y, 1, name='y_max'))
self.accaracy = tf.reduce_mean(tf.cast(self.z, tf.float32))
if __name__ == '__main__':
net = MLPNet()
net.forward()
net.backward()
net.acc()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(10000):
ax, ay = mnist.train.next_batch(100)
loss, accaracy, _ = sess.run(fetches=[net.cost, net.accaracy, net.opt], feed_dict={net.x: ax, net.y: ay})
if i % 1000 == 0:
test_ax, test_ay = mnist.test.next_batch(100)
test_output = sess.run(net.output, feed_dict={net.x: test_ax})
z = tf.equal(tf.argmax(test_output, 1, name='output_max'), tf.argmax(test_ay, 1, name='test_y_max'))
accaracy2 = sess.run(tf.reduce_mean(tf.cast(z, tf.float32)))
print(accaracy2)
最后附上训练截图: