Tensorflow数据集加载

kera.datasets
数据集:
mnist,
IMDB,
cifar10,

tf.nn.softmax让所有概率之和为1
举例

from tensorflow import keras
(x,y),_ =keras.datasets.mnist.load_data()
x.shape
(60000, 28, 28)
y.shape
(60000,)
(x,y),(x_test,y_test)=keras.datasets.mnist.load_data()
x.shape,y.shape,x_test.shape,y_test.shape
((60000, 28, 28), (60000,), (10000, 28, 28), (10000,))
y
array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)
y.max()
9
y_onehot=tf.one_hot(y,depth=10)
y_onehot.shape
TensorShape([60000, 10])
y[1:]
array([0, 4, 1, ..., 5, 6, 8], dtype=uint8)
y.argmax()
4

格式转换
data.Dataset.from_tensor_slices
打散
shuffle

迭代
repeat/batch

x = tf.convert_to_tensor(x, dtype = tf.float32) 
y = tf.convert_to_tensor(y, dtype = tf.int32)
traindb=data.Dataset.from_tensor_slices((x,y))
traindb.shuffle(10000)
<ShuffleDataset shapes: ((28, 28), ()), types: (tf.uint8, tf.uint8)>
train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128)

实战:

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
#x: [60k, 28, 28]
#y: [60k]
(x, y), (x_val, y_val) = datasets.mnist.load_data()
x = tf.convert_to_tensor(x, dtype = tf.float32) / 255.
x_test =tf.convert_to_tensor(x_val, dtype = tf.float32)/255.
#[1-255] > [0-1]
y = tf.convert_to_tensor(y, dtype = tf.int32)
y_test = tf.convert_to_tensor(y_val, dtype = tf.int32)
train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128)
test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(128)
#[b, 784] > [b, 256] > [b, 128] > [b,10]
#[dim_in, dim_out]
w1, b1 = tf.Variable(tf.random.truncated_normal([784, 256],stddev = 0.1)), tf.Variable(tf.zeros([256]))
w2, b2 = tf.Variable(tf.random.truncated_normal([256, 128],stddev = 0.1)), tf.Variable(tf.zeros([128]))
w3, b3 = tf.Variable(tf.random.truncated_normal([ 128, 10],stddev = 0.1)), tf.Variable(tf.zeros([10]))
#h1 =x@w1 +b1
lr = 1e-3
for epoch in range(20): #iteratedb for 10
    for step, (x, y) in enumerate(train_db):
    #x:[128,28, 28]
    #y:[128]
        x =tf.reshape(x, [-1, 28*28])
        with tf.GradientTape() as tape:
            h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])
            h1 = tf.nn.relu(h1)
            #[128, 784]@[784, 256] + [256] > [128,256] + [128, 256]
            h2 = h1 @ w2 + tf.broadcast_to(b2, [x.shape[0], 128])
            h2 = tf.nn.relu(h2)
            #[128, 256] @ [256, 128] + [128] > [128, 128] + [128, 2128]
            out = h2 @ w3 + b3
            #[128, 128] @ [128,10] + [10] > [128, 10] + [128, 210]
            #compute loss
            #out :[128, 10]
            y_onehot = tf.one_hot(y, depth = 10)
            #y: [128, 10]
            loss = tf.square(y_onehot -out)
            #mean:scalar
            loss = tf.reduce_mean(loss)
    #compute gradients
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])
        if step % 100 == 0:
            print(epoch, step, 'loss:', float(loss))
    total_correct, total_num =0, 0
    for step,(x,y) in enumerate(test_db):
        x = tf.reshape(x, [-1, 28*28])
        h1 = tf.nn.relu(x@w1 + b1)
        h2 = tf.nn.relu(h1@w2 + b2)
        out = h2@w3 + b3
        prob = tf.nn.softmax(out, axis = 1)
        preb = tf.argmax(prob, axis = 1)
        preb = tf.cast(preb, tf.int32)
        #[128,10] > [b]
        correct = tf.cast(tf.equal(preb, y), dtype = tf.int32)
        correct =tf.reduce_sum(correct)
        total_correct += int(correct)
        total_num += x.shape[0]
    acc = total_correct / total_num
    print('test_acc:', acc)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值