MNIST数据集:手写数字(0~9)的图片和标签,包含6w张28*28 图片, 用于训练, 1w张28*28图片用于测试
目录
1.导入数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./data/',one_hot= True)
2. 返回各子集样本数
print ("train data size: "+ str(mnist.train.num_examples))
print ("validation data size "+ str(mnist.validation.num_examples))
print ("test data size "+ str(mnist.test.num_examples))
3.返回标签和数据
mnist.train.label[0]
mnist.train.images[0]
4.取一小撮数据,准备喂给神经网络训练
Batch_size = 200
xs, ys =mnist.train.next_batch(Batch_size) #随机抽取Batch_size的样本
print("xs shape: " +str(xs.shape))
print("ys shape: " +str(ys.shape))
5. tf常见函数
tf.get_collections() #从集合中去除全部变量,生成一个列表
tf.add_n()列表内对应元素相加
tf.cast(x, dtype) #把x转为stype类型
tf.argmax(a, axis)#返回最大值所在的索引号
#eg:
tf.argmax([1,0,0],1) 返回0
os.path.join("home","name") #返回home/name
字符串.split() #按指定分析符对字符串切片,返回分割后的列表
#eg:
"./model/mnist_model-1001".split("/")[-1]/split("-")[-1] #返回1001
with tf.Graph() as_default as g: #其内定义的节点在计算图g中
6.保存模型
saver = tf.train.Saver() #实例化Saver模型
with tf.Session() as sess:
for i in range(STEPS):
if i%轮数 == 0:
saver.save(sess, os.path.join(MODEL_SAVE_PATH,MODEL_NAME), global_step =global_step)
7.加载模型
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(存储路径)
if ckpt and ckpt.model_checkpoint_path:
savevr.restore(sess, ckpt.model_checkpoint_path)
8. 实例化还原滑动平均值的saver
#滑动平均
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
9.准确率的计算
#精度计算
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
10.minist代码示例
mnist_forward.py
#coding:utf-8
import tensorflow as tf
INPUT_NODE = 784 #输入的数据是28*28 =784 维的数据
OUTPUT_NODE = 10 #一共有0~9的标签十个
LAYER1_NODE = 500 #只有一层隐藏层,节点数为500
#1.定义神经网络的参数w和b,定义前向传播过程
def get_weight (shape, regularizer):
w = tf.Variable(tf.truncated_normal(shape, stddev = 0.1), dtype = tf.float32)
if regularizer != None:
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
def get_bias(shape):
b = tf.Variable(tf.zeros(shape = shape))
return b
def forward(x, regularizer):
w1 = get_weight ([INPUT_NODE, LAYER1_NODE], regularizer)
b1 = get_bias([LAYER1_NODE])
y1 = tf.nn.relu(tf.matmul(x,w1) +b1)
w2 = get_weight ([LAYER1_NODE,OUTPUT_NODE], regularizer)
b2 = get_bias([OUTPUT_NODE])
y = tf.matmul(y1,w2) +b2 #输出层不过激活函数
return y
mnist_backward.py
#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os
STEPS = 50000
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.1 #初始学习率
LEARNING_RATE_DECAY = 0.99 #学习率衰减率
REGULARIZER = 0.0001 #正则化权重
MOVING_AVERAGE_DECAY = 0.99 #滑动平均
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
def backward(mnist):
x = tf.placeholder (tf.float32, shape = (None, mnist_forward.INPUT_NODE))
y_ = tf.placeholder (tf.float32, shape = (None, mnist_forward.OUTPUT_NODE))
y = mnist_forward.forward(x, REGULARIZER) #前向传播
#运行了几轮batchsize的计数器,初值为0,设置为不被训练
global_step = tf.Variable(0,trainable = False)
#定义指数下降学习率
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase= True)
#定义损失函数
loss_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits =y, labels = tf.argmax(y_,1))
loss_cem = tf.reduce_mean(loss_ce)
loss_total = loss_cem + tf.add_n(tf.get_collection('losses'))
#定义反向传播方法:含正则化
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_total, global_step = global_step)
#实现滑动平均
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
# 反向传播更新参数之后,再更新每一个参数的滑动平均值,用下面的代码可以一次完成这两个操作
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name="train")
#设置完使用滑动平均模型之后,只需要在每次使用反向传播的时候改为使用run.(train_op)就可以正常执行了。
#保存
saver = tf.train.Saver()
#会话执行
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path )
#global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
for i in range(STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss_total, global_step], feed_dict = {x:xs, y_:ys})
if i%1000==0:
print('after ' +str(step) + ' steps, loss is ' +str(loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
def main():
mnist = input_data.read_data_sets('./data/',one_hot= True)
backward(mnist)
if __name__ == "__main__":
main()
mnist_test.py
#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS = 5
def test(mnist):
with tf.Graph().as_default() as g:
x =tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder (tf.float32, shape = (None, mnist_forward.OUTPUT_NODE))
y = mnist_forward.forward(x, None) #前向传播
#滑动平均
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
#精度计算
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict = {x:mnist.test.images, y_ : mnist.test.labels})
print('after ' +str(global_step) + 'steps, accuracy is ' +str(accuracy_score))
else:
print("No checkpoint file is found")
return
time.sleep(TEST_INTERVAL_SECS)
def main():
mnist = input_data.read_data_sets('./data/',one_hot= True)
test(mnist)
if __name__ == "__main__":
main()
mnist_app.py
#coding:utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_forward
import mnist_backward
#加载模型
def restore_model(testPicArr):
#重现计算图
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y = mnist_forward.forward(x, None) #前向传播计算y
preValue = tf.argmax(y,1) #y的最大值为预测结果
#实例化带有滑动平均的saver
variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)#
#执行会话
with tf.Session() as sess:
#加载模型
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
#预测结果
preValue = sess.run(preValue, feed_dict = {x:testPicArr})
return preValue
else:
print("No checkpoint file is found")
return -1
def pre_pic(picName):
img = Image.open(picName) #打开图像
reIm = img.resize((28,28),Image.ANTIALIAS)#重设置图像大小
im_arr = np.array(reIm.convert("L"))
threshold = 50
#对图片的颜色进行处理,二值化
for i in range (28):
for j in range (28):
im_arr[i][j] = 255 - im_arr[i][j]
if (im_arr[i][j] <threshold):
im_arr[i][j] =0
else:
im_arr[i][j] = 255
#变成1行784列的矩阵
nm_arr = im_arr.reshape ([1, 784])
nm_arr = nm_arr.astype(np.float32)
img_ready = np.multiply (nm_arr, 1.0/255.0)#归一化
return img_ready
def application():
testNum = int(input("input the number of test pictures:"))
for i in range(testNum):
testPic = input("the path of the test picture:")
testPicArr = pre_pic(testPic)
preValue = restore_model(testPicArr)
print("the prediction number is: " + str(preValue))
def main():
application()
if __name__ == "__main__":
main()
11.测试自己的数据看看结果
我用到的是这个图,然后把每个数字截一下图,保证每个图里面只有一个数字
结果就是这样子,1容易识别成7,因为我训练的次数比较少,还不稳定的原因