1.前向传播(三层梯度实战)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
# 去除警告
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 导入数据集
# x:[60k,28,28]
# y:[60k]
(x, y), _ = datasets.mnist.load_data()
# 转换为tensorflow类型
# x:[0,255]==>[0,1.]
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
# 查看最小值最大值
print(x.shape, y.shape, x.dtype, y.dtype)
print(tf.reduce_min(x), tf.reduce_max(x))
print(tf.reduce_min(y), tf.reduce_max(y))
# 创建batch,128
train_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)
train_iter = iter(train_db)
sample = next(train_iter)
print("batch:", sample[0].shape, sample[1].shape)
# [b,784]=>[b,256]=>[b,128]=>[b,10]
# [dim_in,dim_out],[dim_out]
w1 = tf.Variable(tf.random.truncated_normal([784, 256],stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128],stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10],stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))
lr = 1e-3
for eporch in range(10):#对整个数据集进行迭代
for step, (x, y) in enumerate(train_db):#对batch进行迭代
# x:[128,28,28]=>[b,28*28]
# y:[128]
# [b, 28,28 ]=>[b,28*28]
x = tf.reshape(x, [-1, 28 * 28])
with tf.GradientTape() as tape:
# x : [b , 28*28 ]
# h1 = x@w1 +b1
# [b,784]@[784,256]+[256]=>[b,256]+[256]=>[b,256]+[b,256]
h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])
h1 = tf.nn.relu(h1)
# [b,256]=>[b,128]
h2 = h1 @ w2 + b2
h2 = tf.nn.relu(h2)
# [b,128]=>[b,10]
out = h2 @ w3 + b3
# 计算误差loss
# out : [b, 10]
# y:[b]=>[b,10]
y_onehot = tf.one_hot(y, depth=10)
# MSE
# [b,10]
loss = tf.square(y_onehot - out)
# mean:scalar
loss = tf.reduce_mean(loss)
# 计算梯度
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
# wl = wl - lr *wl_grad
w1.assign_sub(lr*grads[0])
b1.assign_sub(lr*grads[1])
w2.assign_sub(lr*grads[2])
b2.assign_sub(lr*grads[3])
w3.assign_sub(lr*grads[4])
b3.assign_sub(lr*grads[5])
if step % 100 == 0:
print(eporch,step, 'loss:', float(loss))
2. 张量的合并与分割
①tf.concat
concat实现张量的合并要求最多有一个维度不同
②tf.stack
stack要求所有维度数都要一样
③tf.unstack
unstack固定的将指定维度全部打散,成为列表形式
④tf.split
可以随意指定分割
3.数据统计
① tf.norm(1范数、2范数)
2-范数
1-范数