参考链接
https://blog.csdn.net/lyp_1020k/article/details/109539095 (1)
https://blog.csdn.net/Cowry5/article/details/80174130 (2)
修改
在(1)的基础上,修改了循环语句,使J(
θ
\theta
θ)在最后可以可视化
迭代次数2000以上效果就已经较好
源码
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
path = 'ex1data1.txt'
data = pd.read_csv(path, names=['Population', 'Profit'])
m = len(data)
data.plot(kind='scatter', x='Population', y='Profit', figsize=(12,8))
# 计算代价函数J(θ)
def cost_function(X, y, theta):
diff = X.dot(theta.T) - y
return sum(np.power(diff, 2))/(2*m)
# 求偏导
def gradient_function(X, y, theta):
diff = X.dot(theta.T) - y
return diff.dot(X)/m
# 梯度下降
def gradient_descent(X, y, alpha,epoch):
theta = np.array((m,1))
gradient = gradient_function(X, y, theta)
cost = np.zeros(epoch)
# while not all (abs(gradient) <= 1e-5):修改版,使J(θ)可视化
for i in range(epoch):
theta = theta - alpha * gradient
gradient = gradient_function(X, y, theta)
cost[i] = cost_function(X, y, theta)
return theta,cost
X = data['Population']
y = data['Profit']
X = np.vstack((pd.Series(np.ones(m)), X)).T#将两个(,)形状的数组按垂直方向叠加,再转置
alpha = 0.01
epoch=2000
optimal_theta,cost = gradient_descent(X, y, alpha,epoch)
print('optimal_theta:', optimal_theta)
population = np.linspace(data.Population.min(), data.Population.max()) # 横坐标,两点确定一条直线
profit = optimal_theta[0] + (optimal_theta[1] * population) # 纵坐标
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(population, profit, 'r', label='Prediction')
ax.scatter(data['Population'], data['Profit'], label='Training data')
ax.legend(loc=4) # 4表示标签在右下角
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Prediction Profit by. Population Size')
plt.show()
fig, ax = plt.subplots(figsize=(8,4))
ax.plot(np.arange(epoch), cost, 'r') # np.arange()返回等差数组
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()