通用步骤
- 准备参数
- 定义损失函数
- 定义优化器
- 正式训练
"""
tf的mnist(手写数字)数据集
tf.nn下实现的softmax算法
定义输入数据
定义参数
编写交叉熵损失参数
梯度下降优化器处理
统计预测的准确率
正式训练
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
n_epochs = 1000
learning_rate = 0.001
batch_size = 100
data = input_data.read_data_sets("MNIST_data_bak/", one_hot=True) # 下载的mnist数据集存储的位置
X = tf.placeholder(dtype=tf.float32, shape=(None, 784))
y = tf.placeholder(dtype=tf.float32, shape=(None, 10))
W = tf.Variable(tf.zeros((784, 10)))
b = tf.Variable(tf.zeros([10]))
"""softmax预测值"""
y_pred = tf.nn.softmax(tf.matmul(X, W) + b)
"""交叉熵"""
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), reduction_indices=[1]))
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), axis=1)) 这样写也行
"""优化器"""
training_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
"""评估"""
prediction = tf.equal(tf.argmax(y_pred, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
init = tf.global_variables_initializer()
"""正式训练"""
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
batch_xs, batch_ys = data.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: batch_xs, y: batch_ys})
print("Epoch ", epoch)
print("cross_entropy:", cross_entropy.eval({X: batch_xs, y: batch_ys}))
print("TrainSet Accuracy", accuracy.eval({X: batch_xs, y: batch_ys}))
print("ValidSet Accuracy", accuracy.eval({X: data.validation.images, y: data.validation.labels}))
print(accuracy.eval({X:data.test.images, y:data.test.labels}))