九.微分与自定义训练

import tensorflow as tf
if __name__ == '__main__':
    v = tf.Variable(0.0)#变量<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
    v.assign(5)#<tf.Variable 'UnreadVariable' shape=() dtype=float32, numpy=5.0>
    v.assign_add(1)#<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=6.0>
    v.read_value()#tf.Tensor(6.0, shape=(), dtype=float32)取值

    #微分
    w = tf.Variable([[1.0]])
    with tf.GradientTape() as t:#自动记录运算过程,
        loss = w*w
    grad = t.gradient(loss,w)#tf.Tensor([[2.]], shape=(1, 1), dtype=float32).微分

    w = tf.constant(3.0)#常量
    with tf.GradientTape() as t:  # 自动记录运算过程
        t.watch(w)
        loss = w * w
    grad = t.gradient(loss, w)#tf.Tensor(6.0, shape=(), dtype=float32)

    w = tf.constant(3.0)  # 常量
    with tf.GradientTape(persistent=True) as t:  # 自动记录运算过程,True永久记录,可以多次进行微分
        t.watch(w)
        y = w * w
        z= y * y
    dy_dw = t.gradient(y, w)#tf.Tensor(6.0, shape=(), dtype=float32)
    dz_dw = t.gradient(z,w)#tf.Tensor(108.0, shape=(), dtype=float32)


    (train_image,train_labels),_= tf.keras.datasets.mnist.load_data()
    train_image = tf.expand_dims(train_image,-1)
    train_image = tf.cast(train_image/255,tf.float32)
    train_labels = tf.cast(train_labels,tf.int64)
    dataset = tf.data.Dataset.from_tensor_slices(
        (train_image,train_labels)
    )
    dataset = dataset.shuffle(10000).repeat().batch(32)

    model = tf.keras.Sequential(
        [
            tf.keras.layers.Conv2D(16,[3,3],activation='relu',input_shape=(28,28,1)),
            tf.keras.layers.MaxPool2D(),
            tf.keras.layers.Conv2D(16, [3, 3], activation='relu'),
            tf.keras.layers.GlobalMaxPooling2D(),
            tf.keras.layers.Dense(10)
        ]
    )
    #自定义模型
    optimizer = tf.keras.optimizers.Adam()
    loss_func = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    features,labels = next(iter(dataset))#TensorShape([32, 28, 28, 1]),TensorShape([32])

    def loss(model,x,y):
        y_ = model(x)#预测
        return loss_func(y,y_)

    def train_one_sample(model,images,labels):
        with tf.GradientTape() as t:
            loss_step = loss(model,images,labels)
        grads = t.gradient(loss_step,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))

    def train():
        for epoch in range(5):
            for (batch,(images,labels)) in enumerate(dataset):
                train_one_sample(model,images,labels)
            print('Epoch{} is finished'.format(epoch))
    train()

tf.keras.metrics汇总计算模块

    #tf.keras.metrics 汇总计算模块
    m = tf.keras.metrics.Mean()
    m([10,20,30,40])
    print(m.result().numpy())#25.0
    m.reset_states()#重置

    #a = tf.keras.metrics.sparse_categorical_accuracy('acc')#'acc'起的名称
    train_loss = tf.keras.metrics['train_loss']
    train_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy['train_accuracy']
    def train_one_sample(model,images,labels):
        with tf.GradientTape() as t:
            pred = model(images)
            loss_step = loss(model,images,labels)
        grads = t.gradient(loss_step,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))
        train_loss(loss_step)
        train_accuracy(labels,pred)
    def train():
        for epoch in range(5):
            for (batch,(images,labels)) in enumerate(dataset):
                train_one_sample(model,images,labels)
            print('Epoch{} loss is {} accuracy is {}'.format(epoch,
                                                             train_loss.result(),
                                                             train_accuracy.result()))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值