单变量线性回归
import numpy as np
from numpy.lib.shape_base import column_stack
import pandas as pd
import matplotlib.pyplot as plt
path = 'ex1data1.txt'
data = pd.read_csv(path, header=None, names=['Population','Profit'])
data.head()
data.describe()
data.plot(kind = 'scatter', x = 'Population' , y = 'Profit',figsize = (8,6))
plt.show()
def computeCost(X,y,theta):
inner = np.power(((X*theta.T)-y),2)
return np.sum(inner)/(2*len(X))
data.insert(0,'Ones',1)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
X.head()
y.head()
X = np.matrix(X.values)
y = np.matrix(y.values)
theta = np.matrix(np.array([0,0]))
theta
X.shape, theta.shape, y.shape
computeCost(X, y, theta)
批量梯度下降
def gradientDescent(X,y,theta,alpha,iters):
temp = np.matrix(np.zeros(theta.shape))
parameters = int(theta.ravel().shape[1])
cost = np.zeros(iters)
for i in range(iters):
error = (X * theta.T) - y
for j in range(parameters):
term = np.multiply(error,X[:,j])
temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term) )
theta = temp
cost[i] = computeCost(X,y,theta)
return theta , cost
alpha = 0.01
iters = 1000
g,cost = gradientDescent(X,y,theta,alpha,iters)
computeCost(X, y, g)
x = np.linspace(data.Population.min(),data.Population.max(),100)
f = g[0,0] + (g[0,1] * x)
fig , ax1 = plt.subplots(figsize=(8,6))
ax1.plot(x, f, 'r', label='Prediction')
ax1.scatter(data.Population, data.Profit, label='Traning Data')
ax1.legend(loc=4)
ax1.set_xlabel('Population')
ax1.set_ylabel('Profit')
ax1.set_title('Predicted Profit vs. Population Size')
plt.show()
fig, ax2 = plt.subplots(figsize=(8,6))
ax2.plot(np.arange(iters), cost, 'r')
ax2.set_xlabel('Iterations')
ax2.set_ylabel('Cost')
ax2.set_title('Error vs. Training Epoch')
plt.show()
多变量线性回归
path = 'ex1data2.txt'
data2 = pd.read_csv(path,header = None,names = ['Size','Bedrooms','Price'])
data2.head()
data2 = (data2 - data2.mean()) / data2.std()
data2.head()
data2.insert(0,'Ones',1)
cols = data2.shape[1]
X2 = data2.iloc[:,0:cols-1]
y2 = data2.iloc[:,cols-1:cols]
X2 = np.matrix(X2.values)
y2 = np.matrix(y2.values)
theta2 = np.matrix(np.array([0,0,0]))
g2,cost2 = gradientDescent(X2,y2,theta2,alpha,iters)
computeCost(X2,y2,g2)
fig,ax3 = plt.subplots(figsize=(8,6))
ax3.plot(np.arange(iters), cost2, 'r')
ax3.set_xlabel('Iterations')
ax3.set_ylabel('Cost')
ax3.set_title('Error vs. Training Epoch')
plt.show()
正规方程
def normalEqn(X,y):
theta = np.linalg.inv(X.T@X)@X.T@y
return theta
final_thata2 = normalEqn(X,y)
final_thata2