之前我们有张量基础上对mnist手写数字集进行处理,现在我们要在“层”的基础上对FashionMnist进行处理。
这样的话比起张量层要方便许多,对于“层”的方面,我们只需要使用layers.Dense()来进行封装五层网络从而降维,而不需要使用张量中的
out = relu{relu{relu[X@W1+b1]@W2+b2}@W3+b3} ;此外求解梯度也会简单许多。
在这里附上张量实现的连接:https://blog.csdn.net/weixin_43580130/article/details/107960585
FashionMnist是一个跟Mnist同样大小的数据集,只是它的图片从原来的手写数字变
成了衣服、帽子、鞋子等图片;
具体代码如下:
#在“层”上的手写问题实战
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def preprocess(x, y):#预处理数据集
x = tf.cast(x, dtype=tf.float32) / 255.
y = tf.cast(y, dtype=tf.int32)
return x,y
(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data() #加载数据集
print(x.shape, y.shape)
batchsz = 128
db = tf.data.Dataset.from_tensor_slices((x,y))
db = db.map(preprocess).shuffle(10000).batch(batchsz) #对每个x,y进行处理
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))
db_test = db_test.map(preprocess).batch(batchsz)
db_iter = iter(db)
sample = next(db_iter)
print('batch:', sample[0].shape, sample[1].shape)
model = Sequential([ #五层的网络的一个容器
layers.Dense(256, activation=tf.nn.relu), # [b, 784] => [b, 256] #降维
layers.Dense(128, activation=tf.nn.relu), # [b, 256] => [b, 128]
layers.Dense(64, activation=tf.nn.relu), # [b, 128] => [b, 64]
layers.Dense(32, activation=tf.nn.relu), # [b, 64] => [b, 32]
layers.Dense(10) # [b, 32] => [b, 10], 参数量330 = 32*10 + 10
])
model.build(input_shape=[None, 28*28]) #输入初始维度
model.summary()
# w = w - lr*grad
optimizer = optimizers.Adam(lr=1e-3) #优化器,对参数进行更新
def main():
for epoch in range(30):#跑30轮
for step, (x,y) in enumerate(db): #前向传播
# x: [b, 28, 28] => [b, 784]
# y: [b]
x = tf.reshape(x, [-1, 28*28])
with tf.GradientTape() as tape:
# [b, 784] => [b, 10]
logits = model(x)
y_onehot = tf.one_hot(y, depth=10)
# [b]
loss_mse = tf.reduce_mean(tf.losses.MSE(y_onehot, logits))
loss_ce = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
loss_ce = tf.reduce_mean(loss_ce)
grads = tape.gradient(loss_ce, model.trainable_variables) #计算梯度
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if step % 100 == 0:
print(epoch, step, 'loss:', float(loss_ce), float(loss_mse)) #打印loss
# test
total_correct = 0
total_num = 0
for x,y in db_test: #对每个db_test做前向传播
# x: [b, 28, 28] => [b, 784]
# y: [b]
x = tf.reshape(x, [-1, 28*28])
# [b, 10]
logits = model(x)
# logits => prob, [b, 10]
prob = tf.nn.softmax(logits, axis=1)
# [b, 10] => [b], int64
pred = tf.argmax(prob, axis=1)
pred = tf.cast(pred, dtype=tf.int32)
# pred:[b]
# y: [b]
# correct: [b], True: equal, False: not equal
correct = tf.equal(pred, y) #比较预测值与真实值
correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))
total_correct += int(correct) #tensor => numpy
total_num += x.shape[0]
acc = total_correct / total_num #求正确率
print(epoch, 'test acc:', acc)
if __name__ == '__main__':
main()