import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets("dataset/",one_hot=True)
#print(mnist.train.num_examples,mnist.validation.num_examples,mnist.test.num_examples)
#print(mnist.train.images.shape,mnist.train.labels.shape)
x=tf.placeholder(tf.float32,[None,784],name="X") #输入
y=tf.placeholder(tf.float32,[None,10],name="Y") #输出
W=tf.Variable(tf.random_normal([784,10],name="W")) #构建权重
b=tf.Variable(tf.zeros([10]),name="b") #构建偏置
forward=tf.matmul(x,W)+b #构建前向乘
pred=tf.nn.softmax(forward) #softmax分类
train_epochs=50 #训练轮数
batch_size=100 #单次训练样本数
total_batch=int(mnist.train.num_examples/batch_size) #一轮训练有多少次
display_step=1 #显示粒度
learning_rate=0.01 #学习率
loss_function=tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1)) #定义损伤函数
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function) #选择梯度下降优化器
#检查预测类别tf.argmax(pred,1)与实际类别tf.argmax(y,1)的匹配情况
correct_prediction=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
#准确率,将布尔值装换为浮点数,并计算平均值
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
sess=tf.Session()#声明会话
init=tf.global_variables_initializer()#变量初始化
sess.run(init)
#开始训练
for epoch in range (train_epochs):
for batch in range(total_batch):
xs,ys=mnist.train.next_batch(batch_size) #读取批次数据
sess.run(optimizer,feed_dict={x: xs,y: ys}) #执行批次训练
#total_batch个批次训练完成后,使用验证数据计算误差与准确率;验证集没有分批
loss,acc=sess.run([loss_function,accuracy],feed_dict={x:mnist.validation.images,y:mnist.validation.labels})
#打印训练过程中的详细信息
if (epoch+1) % display_step == 0:
print("Train Epoch:", '%02d' % (epoch+1),"Loss=","{:.9f}".format(loss),\
"Accuracy=","{:.4f}".format(acc))
print("Train Finished!")