Tensorflow基础API使用_4.手动求导进行回归求解(tf.GradientTape与Keras结合)


import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)



#手动求导的方式进行 回归问题的求解
#####################################################################################

from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)

#####################################################################################


from sklearn.model_selection import train_test_split

x_train_all, x_test, y_train_all, y_test = train_test_split(
    housing.data, housing.target, random_state = 7)
x_train, x_valid, y_train, y_valid = train_test_split(
    x_train_all, y_train_all, random_state = 11)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)

#####################################################################################


from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)

#####################################################################################

# metric使用

metric = keras.metrics.MeanSquaredError()#平方差
print(metric([5.], [2.]))#得9 5-2的平方
print(metric([0.], [1.]))#得5 ,因为keras中计算平方差会累加,1-0的平方 + 9 最后除2
print(metric.result())

metric.reset_states() #如果 不想累加 ,就调用这个函数
metric([1.], [3.])
print(metric.result())

#####################################################################################

# 1. batch 遍历训练集 metric
#    1.1 自动求导
# 2. epoch结束 验证集 metric

epochs = 100
batch_size = 32
steps_per_epoch = len(x_train_scaled) // batch_size
optimizer = keras.optimizers.SGD()
metric = keras.metrics.MeanSquaredError()

#在fit中应该是随机的遍历数据通过shuffle
#现在通过以下函数随机取数据
def random_batch(x, y, batch_size=32):
    idx = np.random.randint(0, len(x), size=batch_size)
    return x[idx], y[idx]#因为x,y是numpy的格式所以可以通过[](里面存的是索引)去取数据


model = keras.models.Sequential([
    keras.layers.Dense(30, activation='relu',
                       input_shape=x_train.shape[1:]),
    keras.layers.Dense(1),
])


for epoch in range(epochs):
    metric.reset_states()
    for step in range(steps_per_epoch):
        x_batch, y_batch = random_batch(x_train_scaled, y_train,
                                        batch_size)
        with tf.GradientTape() as tape:
            y_pred = model(x_batch)#可以用model像函数一样去使用,输入是一个batch的样本值,输出是预测值

            loss = tf.reduce_mean(
                keras.losses.mean_squared_error(y_batch, y_pred))#求loss
            metric(y_batch, y_pred)#累计的计算一个batch中metric(平方差)(真实值和预测值的差距)
        grads = tape.gradient(loss, model.variables)#loss对参数求导

        grads_and_vars = zip(grads, model.variables)#变量和梯度一一绑定

        optimizer.apply_gradients(grads_and_vars)#对参数的更新
        print("\rEpoch", epoch, " train mse:",
              metric.result().numpy(), end="")
    #每一个epoch进行一次验证
    y_valid_pred = model(x_valid_scaled)
    valid_loss = tf.reduce_mean(
        keras.losses.mean_squared_error(y_valid_pred, y_valid))
    print("\t", "valid mse: ", valid_loss.numpy())
        


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值