Tensorflow(九)自定义求导梯度下降及应用到回归问题

import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras

# 自定义求导,主要用于研究
def g(x1, x2):
    return (x1 + 5) * (x2 ** 2)


# ----------------
tf.GradientTape求导
# ----------------
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
# GradientTape用于求导且只能调用一次,如果想调用多次就需要设置persistent=True,但需要自己释放
with tf.GradientTape(persistent=True) as tape:
    z = g(x1, x2)

# 调用一次后被释放
dz_x1 = tape.gradient(z, x1)
print(dz_x1)

try:
    dz_x2 = tape.gradient(z, x2)
except RuntimeError as err:
    print(err)
    
    
print(dz_x2)
del tape


# ----------------
同时求解多变量导数的第二种方法:传入列表
# ----------------
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
with tf.GradientTape() as tape:
    z = g(x1, x2)

# 传入列表
dz_x1x2 = tape.gradient(z, [x1, x2])
print(dz_x1x2)

# ----------------
# 关注 常量的 导数
# ----------------
x1 = tf.constant(2.0)
x2 = tf.constant(3.0)

with tf.GradientTape() as tape:
    tape.watch(x1)
    tape.watch(x2)
    z = g(x1, x2)

dz_x1x2 = tape.gradient(z, [x1, x2])
print(dz_x1x2)

# ----------------
# 两个目标函数 对 一个变量求导
# ----------------
x = tf.Variable(5.0)
with tf.GradientTape() as tape:
    z1 = 3 * x
    z2 = x ** 2
    
tape.gradient([z1, z2], x)

# ----------------
# 二阶导数及高阶导数
# --------------
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)

with tf.GradientTape(persistent=True) as outer_tape:
    with tf.GradientTape(persistent=True) as inter_tape:
        z = g(x1, x2)
    inner_grads = inter_tape.gradient(z, [x1, x2])
outer_grads = [outer_tape.gradient(inner_grads, [x1, x2])]

print(outer_grads)
del inter_tape
del outer_tape


# ----------------
# 模拟梯度下降
# ----------------
def f(x):
    return 3. * x ** 2 + 2. * x - 1

learning_rate = 0.1
x = tf.Variable(0.0)

optimizer = keras.optimizers.SGD(lr = learning_rate)
for _ in range(100):
    with tf.GradientTape() as tape:
        z = f(x)
    dz_dx = tape.gradient(z, x)
    
    # 和 optimizer 联合使用 参数 是列表,每个元素都是一个pair (梯度,参数)
    optimizer.apply_gradients([(dz_dx, x)])
    
    # 手动下降
    # x.assign_sub(learning_rate * dz_dx)
    

自定义实现梯度下降解决回归问题

import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras

from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()

from sklearn.model_selection import train_test_split

x_train_all, x_test, y_train_all, y_test = train_test_split(
housing.data, housing.target, random_state=7)

x_train, x_val, y_train, y_val = train_test_split(
x_train_all, y_train_all, random_state=11, test_size=0.25)

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()

x_train_scaled = scaler.fit_transform(x_train)
x_val_scaled = scaler.transform(x_val)
x_test_scaled = scaler.transform(x_test)



#metrci使用

metric = keras.metrics.MeanSquaredError()
print(metric([5.], [2.]))
print(metric([0.], [1.]))
# keras的metric会有累加功能

# 手动取消累加
metric.reset_states()
metric([1.], [3.])
print(metric.result())

# fit 函数做了什么
# 1 batch 遍历训练集 根据metric 统计
#     1.1 手动 替换 自动求导
# 2 epoch结束 验证机 metric统计

epochs = 100
batch_size = 32
steps_per_epoch = len(x_train_scaled) // batch_size
optimizer = keras.optimizers.SGD()
metric = keras.metrics.MeanSquaredError()


# 模拟fit中随机取数据
def random_batch(x, y, batch_size=32):
    idx = np.random.randint(0, len(x), size=batch_size)
    return x[idx], y[idx]
    
model = keras.models.Sequential([
    keras.layers.Dense(30, activation='relu', input_shape=x_train.shape[1:]),
    keras.layers.Dense(1)
])


for epoch in range(epochs):
    metric.reset_states()
    for step in range(steps_per_epoch):
        # 取数据
        x_batch, y_batch = random_batch(x_train_scaled, y_train, batch_size)
        # 获得预测值,model像函数一样使用
        with tf.GradientTape() as tape:
            y_pred = model(x_batch)
            # 定义目标函数
            loss = tf.reduce_mean(keras.losses.mean_squared_error(y_batch, y_pred))
            # 评价
            metric(y_batch, y_pred)
        # 计算梯度
        grads = tape.gradient(loss, model.variables)
        # 绑定梯度和变量
        grads_and_vars = zip(grads, model.variables)
        # 更新梯度
        optimizer.apply_gradients(grads_and_vars)
        # \r 返回当前行最开始的位置
        print("\rEpoch", epoch, "train mse:",
             metric.result().numpy(), end="")
    y_val_pred = model(x_val_scaled)
    val_loss = tf.reduce_mean(
        keras.losses.mean_squared_error(y_val_pred, y_val))
    print('\t', 'valid mse:', val_loss.numpy())
    

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值