import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.training.training_util import global_step
hidden_layer_size = [256]*4
input_layer_size = 784
output_layer_size = 10
mnist = input_data.read_data_sets('data/', one_hot=True)
train_img = mnist.train.images
train_lbl = mnist.train.labels
test_img = mnist.test.images
test_lbl = mnist.test.labels
weights = {'wc1' : tf.Variable(tf.random_normal([3,3,1,64], stddev=0.1)),
'wc2' : tf.Variable(tf.random_normal([3,3,64,128], stddev=0.1)),
'wd1' : tf.Variable(tf.random_normal([7*7*128, 1024], stddev=0.1)),
'wd2' : tf.Variable(tf.random_normal([1024, 10], stddev=0.1))}
bias = {'bc1' : tf.Variable(tf.random_normal([64], stddev=0.1)),
'bc2' : tf.Variable(tf.random_normal([128], stddev=0.1)),
'bd1' : tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2' : tf.Variable(tf.random_normal([10], stddev=0.1))}
def conv_bn_relu_maxpool(_input, _w, _b):
_conv = tf.nn.conv2d(_input, _w, strides=[1,1,1,1], padding='SAME')
_bn = tf.nn.batch_normalization(_conv, mean=0, variance=1,
offset=0, scale=1,
variance_epsilon=0.00001)
_relu = tf.nn.relu(tf.nn.bias_add(_bn, _b))
_pool = tf.nn.max_pool(_relu, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
return _pool
def conv_net(_input, _w, _b, _keep_ratio):
_input_re = tf.reshape(_input, shape=[-1, 28, 28, 1])
_conv1 = conv_bn_relu_maxpool(_input_re, _w['wc1'], _b['bc1'])
_drop1 = tf.nn.dropout(_conv1, keep_prob=_keep_ratio)
_conv2 = conv_bn_relu_maxpool(_drop1, _w['wc2'], _b['bc2'])
_drop2 = tf.nn.dropout(_conv2, keep_prob=_keep_ratio)
_dense = tf.reshape(_drop2, shape=[-1, 7*7*128])
_fc1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(_dense, _w['wd1']), _b['bd1']))
_drop3 = tf.nn.dropout(_fc1, keep_prob=_keep_ratio)
_score = tf.nn.bias_add(tf.matmul(_drop3, _w['wd2']), _b['bd2'])
return _score
x = tf.placeholder(tf.float32, [None, input_layer_size], name='input')
y = tf.placeholder(tf.float32, [None, output_layer_size], name='output')
dropout_keep_prob = tf.placeholder(tf.float32)
score = conv_net(x, weights, bias, dropout_keep_prob)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(score, y))
lr = 0.001
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)
# optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)
pred = tf.equal(tf.argmax(score, 1), tf.argmax(y,1))
acc = tf.reduce_mean(tf.cast(pred, tf.float32))
init = tf.initialize_all_variables()
epoch = 100
batch_size = 100
snapshot = 5
save_step = 1
saver = tf.train.Saver()
sess = tf.Session()
sess.run(init)
FLAG_TRAIN = False
if FLAG_TRAIN:
loss_cache = []
acc_cache = []
for ep in xrange(epoch):
num_batch = mnist.train.num_examples/batch_size
avg_loss, avg_acc = 0, 0
for nb in xrange(num_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
out = sess.run([optimizer, acc, loss], feed_dict={x:batch_x, y:batch_y, dropout_keep_prob:0.7})
avg_loss += out[2]/num_batch
avg_acc += out[1]/num_batch
loss_cache.append(avg_loss)
acc_cache.append(avg_acc)
if ep % snapshot == 0:
print 'Epoch: %d, loss: %.4f, acc: %.4f'%(ep, avg_loss, acc_cache[-1])
if ep % save_step == 0:
saver.save(sess, save_path='net_snapshot/conv_net_tfmodel', global_step=ep)
print 'test accuracy:' , sess.run(acc, {x:test_img, y:test_lbl, dropout_keep_prob:1.0})
plt.figure(1)
plt.plot(range(len(loss_cache)), loss_cache, 'b-', label='loss')
plt.legend(loc = 'upper right')
plt.figure(2)
plt.plot(range(len(acc_cache)), acc_cache, 'o-', label='acc')
plt.legend(loc = 'lower right')
plt.show()
else:
ep = epoch-1
saver.restore(sess, 'net_snapshot/conv_net_tfmodel-99')
print 'test accuracy:' , sess.run(acc, {x:test_img, y:test_lbl, dropout_keep_prob:1.0})
# Epoch: 0, loss: 0.5237, acc: 0.8749
# Epoch: 5, loss: 0.0370, acc: 0.9878
# Epoch: 10, loss: 0.0193, acc: 0.9937
# Epoch: 15, loss: 0.0125, acc: 0.9958
# Epoch: 20, loss: 0.0091, acc: 0.9969
# Epoch: 25, loss: 0.0091, acc: 0.9969
# Epoch: 30, loss: 0.0058, acc: 0.9982
# Epoch: 35, loss: 0.0068, acc: 0.9981
# Epoch: 40, loss: 0.0070, acc: 0.9981
# Epoch: 45, loss: 0.0059, acc: 0.9983
# Epoch: 50, loss: 0.0061, acc: 0.9986
# Epoch: 55, loss: 0.0061, acc: 0.9983
# Epoch: 60, loss: 0.0075, acc: 0.9983
# Epoch: 65, loss: 0.0051, acc: 0.9987
# Epoch: 70, loss: 0.0049, acc: 0.9987
# Epoch: 75, loss: 0.0078, acc: 0.9982
# Epoch: 80, loss: 0.0073, acc: 0.9982
# Epoch: 85, loss: 0.0061, acc: 0.9985
# Epoch: 90, loss: 0.0052, acc: 0.9988
# Epoch: 95, loss: 0.0074, acc: 0.9986
# test accuracy: 0.9946
Tensorflow: Convolutional Neural Network Basic
最新推荐文章于 2022-08-05 20:28:13 发布