多变量线性回归( Size / Bedrooms / Prize 数据)
开发环境
Anaconda 4.9.2 / Python 3.6.12
任务
已知一个有关房屋的数据集,其中有两个变量(房子的大小和卧室的数量)和目标(房子的价格),现需分析该数据集。
程序分解
导入原始数据
代码
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
path = 'E:\Ana3\linear_2\ex1data2.txt'
data2 = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])
data2.head()
print(data2.head())#看前五行
结果
预处理之特征归一化
代码
data2 = (data2 - data2.mean()) / data2.std()
print(data2.head())#特征归一化之后的前五行数据
结果
计算代价函数
代码
def computeCost(X, y, theta):
inner = np.power(((X * theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))#定义代价函数J
data2.insert(0, 'Ones', 1)
cols = data2.shape[1]#在训练集中添加一列,值为1,以便使用向量化的方案计算代价和梯度
X2 = data2.iloc[:,0:cols-1]#X2是所有行,去掉最后一列
y2 = data2.iloc[:,cols-1:cols]#y2是所有行,最后一列
print(X2.head())
print(y2.head())#head()是观察前5行
X2 = np.matrix(X2.values)
y2 = np.matrix(y2.values)
theta2 = np.matrix(np.array([0,0,0]))
print(computeCost(X2, y2, theta2))#计算初始代价函数的值 (theta初始值为0)
结果
①观察X2,y2的前五行:
②输出初始代价函数的值 (theta初始值为0):0.489(归一化值)
批量梯度下降算法( Batch gradient decent )
代码
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))#初始化一个theta临时矩阵temp,维数(1, 2)
parameters = int(theta.ravel().shape[1])
cost = np.zeros(iters)#初始化代价数组
for i in range(iters):
error = (X * theta.T) - y
for j in range(parameters):
term = np.multiply(error, X[:,j])
temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))#梯度下降法中theta的迭代公式
theta = temp
cost[i] = computeCost(X, y, theta)#更新theta后的代价值
return theta, cost
alpha = 0.01
iters = 1000
g2, cost2 = gradientDescent(X2, y2, theta2, alpha, iters)
print(computeCost(X2, y2, g2))
结果
绘制代价-迭代次数曲线
代码
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost2, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()
结果
完整代码
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
path = 'E:\Ana3\linear_2\ex1data2.txt'
data2 = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])
data2.head()
print(data2.head())#看前五行
data2 = (data2 - data2.mean()) / data2.std()
print(data2.head())#特征归一化之后的前五行数据
def computeCost(X, y, theta):
inner = np.power(((X * theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))#定义代价函数J
data2.insert(0, 'Ones', 1)
cols = data2.shape[1]#在训练集中添加一列,值为1,以便使用向量化的方案计算代价和梯度
X2 = data2.iloc[:,0:cols-1]#X2是所有行,去掉最后一列
y2 = data2.iloc[:,cols-1:cols]#y2是所有行,最后一列
print(X2.head())
print(y2.head())#head()是观察前5行
X2 = np.matrix(X2.values)
y2 = np.matrix(y2.values)
theta2 = np.matrix(np.array([0,0,0]))
print(computeCost(X2, y2, theta2))#计算初始代价函数的值 (theta初始值为0)
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))#初始化一个theta临时矩阵temp,维数(1, 2)
parameters = int(theta.ravel().shape[1])
cost = np.zeros(iters)#初始化代价数组
for i in range(iters):
error = (X * theta.T) - y
for j in range(parameters):
term = np.multiply(error, X[:,j])
temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))#梯度下降法中theta的迭代公式
theta = temp
cost[i] = computeCost(X, y, theta)#更新theta后的代价值
return theta, cost
alpha = 0.01
iters = 1000
g2, cost2 = gradientDescent(X2, y2, theta2, alpha, iters)
print(computeCost(X2, y2, g2))
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost2, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()