梯度下降法
梯度下降法基本原理
梯度下降法实现线性回归
TensorFlow可训练变量和自动求导机制
TensorFlow实现梯度下降
模型评估
波士顿房价预测
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import tensorflow as tf
boston_housing = tf.keras.datasets.boston_housing
(train_x,train_y),(test_x,test_y)=boston_housing.load_data()
x_train=train_x[:,5] #房间数属性
y_train=train_y #房屋总价
x_test=test_x[:,5]
y_test=test_y
learn_rate=0.04 #学习率
iter = 2000 #迭代次数
display_step=200 #显示间隔
#参数初始值
np.random.seed(612)
w=tf.Variable(np.random.randn())
b=tf.Variable(np.random.randn())
mse_train=[]
mse_test=[]
for i in range(0,iter):
with tf.GradientTape() as tape:
pred_train=w*x_train+b
loss_train = 0.5*tf.reduce_mean(tf.square(y_train-pred_train))
pred_test = w*x_test+b
loss_test=0.5*tf.reduce_mean(tf.square(y_test-pred_test))
mse_train.append(loss_train)
mse_test.append(loss_test)
dl_dw,dl_db = tape.gradient(loss_train,[w,b])
w.assign_sub(learn_rate*dl_dw)
b.assign_sub(learn_rate*dl_db)
if i%display_step==0:
print("i: %i, Train loss: %f, test loss:%f"%(i,loss_train,loss_test))
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.scatter(x_train,y_train,color="blue",label="data")
plt.plot(x_train,pred_train,color="red",label="model")
plt.legend(loc="upper left")
plt.subplot(222)
plt.plot(mse_train,color="blue",linewidth=3,label="train loss")
plt.plot(mse_test,color="red",linewidth=1.5,label="test loss")
plt.legend(loc="upper right")
plt.subplot(223)
plt.plot(y_train,color="blue",marker="o",label="true_price")
plt.plot(pred_train,color="red",marker=".",label="predict")
plt.legend()
plt.subplot(224)
plt.plot(y_test,color="blue",marker="o",label="true_price")
plt.plot(pred_test,color="red",marker=".",label="predict")
plt.legend()
plt.show()