tensorflow2.0学习笔记(二)

有注释,复制到Pycharm或者VScode里面看

import  tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import datasets
import  os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# x: [60000, 28, 28],
# y: [60000]
(x, y), _ = datasets.mnist.load_data()

# x: [0-255] => [0-1.0]
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)

print("x.shape:{} y.shape:{} x.dtype:{} y.dtype:{}".format(x.shape, y.shape, x.dtype, y.dtype))
print(tf.reduce_min(x), tf.reduce_max(x)) # tf.reduce_max(x),计算一个张量的各个维度上元素的最大值
print(tf.reduce_min(y), tf.reduce_max(y))
x.shape:(60000, 28, 28) y.shape:(60000,) x.dtype:<dtype: 'float32'> y.dtype:<dtype: 'int32'>
tf.Tensor(0.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
tf.Tensor(0, shape=(), dtype=int32) tf.Tensor(9, shape=(), dtype=int32)
train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128)
train_iter = iter(train_db) # 用来生成迭代器。
sample = next(train_iter)
print('batch:', sample[0].shape, sample[1].shape)
batch: (128, 28, 28) (128,)
# [b, 784] => [b, 256] => [b, 128] => [b, 10]
# [dim_in, dim_out], [dim_out]
# tf.truncated_normal函数,生成的值是在距离均值两个标准差范围之内的,(μ-2σ,μ+2σ),保证了生成的值都在均值附近。避免梯度爆炸产生NAN
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1)) 
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))

lr = 1e-3

for epoch in range(10): # iterate db for 10
    for step, (x, y) in enumerate(train_db): # for every batch
        # x:[128, 28, 28]
        # y: [128]

        # [b, 28, 28] => [b, 28*28]
        x = tf.reshape(x, [-1, 28*28])

        with tf.GradientTape() as tape: # tf.Variable
            # x: [b, 28*28]
            # h1 = x@w1 + b1
            # [b, 784]@[784, 256] + [256] => [b, 256] + [256] => [b, 256] + [b, 256]
            h1 = x@w1 + tf.broadcast_to(b1, [x.shape[0], 256])
            h1 = tf.nn.relu(h1)
            # [b, 256] => [b, 128]
            h2 = h1@w2 + b2
            h2 = tf.nn.relu(h2)
            # [b, 128] => [b, 10]
            out = h2@w3 + b3

            # compute loss
            # out: [b, 10]
            # y: [b] => [b, 10]
            y_onehot = tf.one_hot(y, depth=10)

            # mse = mean(sum(y-out)^2)
            # [b, 10]
            loss = tf.square(y_onehot - out)
            # mean: scalar
            loss = tf.reduce_mean(loss)

        # compute gradients
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
        # print(grads)
        # w1 = w1 - lr * w1_grad
        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])


        if step % 100 == 0:
            print(epoch, step, 'loss:', float(loss))
0 0 loss: 0.30855444073677063
0 100 loss: 0.1943640410900116
0 200 loss: 0.20688548684120178
0 300 loss: 0.18792247772216797
0 400 loss: 0.1854521483182907
1 0 loss: 0.15025171637535095
1 100 loss: 0.14979198575019836
1 200 loss: 0.1626807451248169
1 300 loss: 0.15183447301387787
1 400 loss: 0.15254397690296173
2 0 loss: 0.1266193687915802
2 100 loss: 0.1301368772983551
2 200 loss: 0.1398109793663025
2 300 loss: 0.13149653375148773
2 400 loss: 0.13328148424625397
3 0 loss: 0.1121378168463707
3 100 loss: 0.11777482181787491
3 200 loss: 0.1255255490541458
3 300 loss: 0.11861814558506012
3 400 loss: 0.12068285793066025
4 0 loss: 0.10235831886529922
4 100 loss: 0.10916677862405777
4 200 loss: 0.11564607918262482
4 300 loss: 0.1097228154540062
4 400 loss: 0.11179308593273163
5 0 loss: 0.09519632160663605
5 100 loss: 0.10276556015014648
5 200 loss: 0.10830307006835938
5 300 loss: 0.10308554023504257
5 400 loss: 0.10502723604440689
6 0 loss: 0.0897449478507042
6 100 loss: 0.09781351685523987
6 200 loss: 0.10259167104959488
6 300 loss: 0.09793788939714432
6 400 loss: 0.09971939027309418
7 0 loss: 0.08542229235172272
7 100 loss: 0.09375528991222382
7 200 loss: 0.09798217564821243
7 300 loss: 0.0937323346734047
7 400 loss: 0.09538793563842773
8 0 loss: 0.08188784122467041
8 100 loss: 0.09030032157897949
8 200 loss: 0.09406483173370361
8 300 loss: 0.09022961556911469
8 400 loss: 0.09179739654064178
9 0 loss: 0.07893332093954086
9 100 loss: 0.08734345436096191
9 200 loss: 0.09066981077194214
9 300 loss: 0.08724066615104675
9 400 loss: 0.08874273300170898

参考:龙龙老师

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值