手写分类识别二:多层神经网络
一、 全连接但隐藏层全连接神经网络
-
载入数据
import tensorflow as tf #导入Tensorfloe提供的读取MNIST的模块 import tensorflow.examples.tutorials.mnist.input_data as input_data #读取MNIST数据 mnist = input_data.read_data_sets("C:/Users/grid/Desktop/MNIST_data/",one_hot=True)#读取数据(数据集缩放的文件目录,标签的数据格式)
-
构建输入层
#定义标签数据占位符 x= tf.placeholder(tf.float32,[None,784],name="X") y= tf.placeholder(tf.float32,[None,10],name="Y")
-
构建隐藏层
#隐藏层神经元数量 H1_NN =256 W1= tf.Variable(tf.random_normal([784,H1_NN])) #784行 ,H1_NN列 b1= tf.Variable(tf.zeros([H1_NN])) #赋初值为0 Y1= tf.nn.relu(tf.matmul(x,W1)+b1) #叉乘.
-
构建输出层
W2 = tf.Variable(tf.random_normal([H1_NN,10])) b2 = tf.Variable(tf.zeros([10])) forward = tf.matmul(Y1,W2)+b2 pred = tf.nn.softmax(forward)
-
训练模型
#* 定义损失函数 #交叉熵 loss_function = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1)) #设置训练参数 train_epochs=40 #训练轮次 batch_size=50 #一次要训练50个样本 total_batch=int(mnist.train.num_examples/batch_size) #(训练的数据集/训练的大小)得到训练次数 display_step=1 #控制显示力度 learning_rate = 0.01 #学习率 #选择优化器 optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_function) #定义准确率 correct_prediction= tf.equal(tf.argmax(y,1),tf.argmax(pred,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
- 开始模型训练
#开始模型训练 #记录训练开始时间 from time import time startTime =time() #变量初始化 sess= tf.Session() sess.run(tf.global_variables_initializer()) #循环训练 for epoch in range (train_epochs): for batch in range(total_batch): xs,ys = mnist.train.next_batch(batch_size) #读取批次数据 sess.run(optimizer,feed_dict={x:xs,y:ys}) #执行批次训练 #total_batch个批次训练完成后,使用验证集数据计算误差与准确率 loss,acc=sess.run([loss_function,accuracy], feed_dict={x:mnist.validation.images, y:mnist.validation.labels}) if(epoch+1)%display_step ==0: print("Train Epoch:","%02d" %(epoch+1), "loss=","{:.9f}".format(loss),"Accuracy=","{:.4f}".format(acc)) #显示运行总时间 duration = time()-startTime print("Train Finished takes:","{:.2f}".format(duration))
整个项目代码:
##载入数据
import tensorflow as tf
#导入Tensorfloe提供的读取MNIST的模块
import tensorflow.examples.tutorials.mnist.input_data as input_data
#读取MNIST数据
mnist = input_data.read_data_sets("C:/Users/grid/Desktop/MNIST_data/",one_hot=True)#读取数据(数据集缩放的文件目录,标签的数据格式)
##构建输入层
#定义标签数据占位符
x= tf.placeholder(tf.float32,[None,784],name="X")
y= tf.placeholder(tf.float32,[None,10],name="Y")
##构建隐藏层
#隐藏层神经元数量
H1_NN =256
W1= tf.Variable(tf.random_normal([784,H1_NN])) #784行 ,H1_NN列
b1= tf.Variable(tf.zeros([H1_NN])) #赋初值为0
Y1= tf.nn.relu(tf.matmul(x,W1)+b1) #叉乘.
##构建输出层
W2 = tf.Variable(tf.random_normal([H1_NN,10]))
b2 = tf.Variable(tf.zeros([10]))
forward = tf.matmul(Y1,W2)+b2
pred = tf.nn.softmax(forward)
##训练模型
#* 定义损失函数
#交叉熵
loss_function = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pre