MNIST数据集
tensorflow实现mnist数据集的前向传播、后向传播、测试的代码(4层全连接层)。
mnist数据集有60000张训练数据集和10000张测试数据集,每张图片的大小为28*28。
1.使用五步加载数据集对数据集预处理
2.构造参数(w,b)
3.利用激活函数前向传播获得输出值
4.计算loss(真实值与输出值计算)
5.利用梯度反向传播更新参数
6.测试
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import datasets
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# x: [60k, 28, 28], x_test:[10k, 28, 28]
# y: [60k], y_test:[10k]
(x, y), (x_test, y_test)= datasets.mnist.load_data()
#将数据转换成tensor
# x: [0~255] => [0~1.] 归一化
#y:表示0-9十个数字类型
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255.
y_test = tf.convert_to_tensor(y_test, dtype=tf.int32)
#查看x,y的数据类型,最大值,最小值
print(x.shape, y.shape, x.dtype, y.dtype)
print(tf.reduce_min(x), tf.reduce_max(x))
print(tf.reduce_min(y), tf.reduce_max(y))
#创建数据集batch(128)一次能取128个数据(图片)
train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128)
test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(128)
#迭代器不停迭代,直到将加载的数据集完全取出,每一次取x:[128, 28, 28], y:[128]
train_iter = iter(train_db)
sample = next(train_iter)
print('batch:', sample[0].shape, sample[1].shape)
#构造参数
# [b, 784] => [b, 256] => [b, 128] => [b, 64] => [b, 10]
# [dim_in, dim_out], [dim_out]
# 隐藏层1张量
#裁剪的正态分布随机数
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
# 隐藏层2张量
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
# 隐藏层3张量
w3 = tf.Variable(tf.random.truncated_normal([128, 64], stddev=0.1))
b3 = tf.Variable(tf.zeros([64]))
# 输出层张量
w4 = tf.Variable(tf.random.truncated_normal([64, 10], stddev=0.1))
b4 = tf.Variable(tf.zeros([10]))
lr = 1e-3
for epoch in range(10): # iterate db for 10
for step, (x, y) in enumerate(train_db): # for every batch
# x:[128, 28, 28]
# y: [128]
# [b, 28, 28] => [b, 28*28] 维度变换
x = tf.reshape(x, [-1, 28*28])
#前向传播
with tf.GradientTape() as tape: # 默认跟踪tf.Variable类型的数据,需要将w,b从tensor转成variable
# x: [b, 28*28]
# 隐藏层1前向计算,[b, 28*28] => [b, 256]
h1 = x@w1 + tf.broadcast_to(b1, [x.shape[0], 256])
h1 = tf.nn.relu(h1)
# 隐藏层2前向计算,[b, 256] => [b, 128]
h2 = h1@w2 + b2
h2 = tf.nn.relu(h2)
# 隐藏层3前向计算,[b, 128] => [b, 64]
h3 = h2@w3 + b3
h3 = tf.nn.relu(h3)
# 输出层前向计算,[b, 64] => [b, 10]
h4 = h3@w4 + b4
out = h4
# 计算误差loss
# out: [b, 10]
# y: [b] => [b, 10]
y_onehot = tf.one_hot(y, depth=10)
# 均方差:mse = mean(sum(y-out)^2)
# [b, 10]
loss = tf.square(y_onehot - out)
# mean: scalar(标量)
loss = tf.reduce_mean(loss)
#后向传播,修改参数
# compute gradients(计算梯度)
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3, w4, b4])
# print(grads)
# w1 = w1 - lr * w1_grad
w1.assign_sub(lr * grads[0]) #原地更新参数
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5])
w4.assign_sub(lr * grads[6])
b4.assign_sub(lr * grads[7])
if step % 100 == 0: #每100次输出loss的值
print(epoch, step, 'loss:', float(loss))
#test
#使用当前阶段的参数进行测试[w1, b1, w2 ,b2, w3, b3, w4, b4]
total_correct, total_num = 0, 0
for step,(x,y) in enumerate(test_db):
#[b, 28, 28] ==>[b, 28*28]
x = tf.reshape(x,[-1,28*28])
#[b, 784] ==> [b,256] ==> [b,128] ==> [b,10]
h1 = tf.nn.relu(x@w1 + b1)
h2 = tf.nn.relu(h1@w2 + b2)
h3 = tf.nn.relu(h2@w3 + b3)
out = h3@w4 + b4
#out:[b, 10] - R
#prob:[b, 10] - [0,1]计算输出值的概率(softmax)
prob = tf.nn.softmax(out, axis=1)
#[b, 10] ==> [b] 获取概率最大值的索引位置
pred = tf.argmax(prob, axis=1)
pred = tf.cast(pred, dtype=tf.int32) #将pred;int64转为int32
#print(pred.dtype,y.dtype)
#y:[b]
#[b],int32 equal比较预测值与真实值,相同返回True,不同返回False,再转换成int(0,1)
correct = tf.cast(tf.equal(pred, y), dtype = tf.int32)
correct = tf.reduce_sum(correct) #预测正确值为1,统计预测正确的个数
total_correct += int(correct) #总的正确个数
total_num += x.shape[0] #总的测试个数
acc = total_correct/total_num #预测得分
print('test acc:',acc)