#调试梯度
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(666)
X = np.random.random(size = (1000,10))
true_theta = np.arange(1,12,dtype = float)
X_b = np.hstack([np.ones((len(X),1)),X])
y = X_b.dot(true_theta) + np.random.normal(size = 1000)
print(true_theta)
[ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11.]
def J(theta,X_b,y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ_math(theta,X_b,y):
return X_b.T.dot(X_b.dot(theta) - y) * 2. / len(y)
def dJ_debug(theta,X_b,y,epsilon = 0.01):
res = np.empty(len(theta))
for i in range(len(theta)):
theta_1 = theta.copy()
theta_1[i] += epsilon
theta_2 = theta.copy()
theta_2[i] -= epsilon
res[i] = (J(theta_1,X_b,y) - J(theta_2,X_b,y)) / (2 * epsilon)
return res
def gradient_descent(dJ,X_b,initial_theta,eta,n_iters = 1e4,epsilon = 1e-4):
theta = initial_theta
cur_iter = 0
while cur_iter < n_iters:
gradient = dJ(theta,X_b,y)
last_theta = theta
theta = theta - eta * gradient
if (np.abs(J(theta,X_b,y) - J(last_theta,X_b,y)) < epsilon):
break
cur_iter += 1
return theta
X_b = np.hstack([np.ones((len(X),1)),X])
initial_theta = np.zeros(X_b.shape[1])
eta = 0.1
theta = gradient_descent(dJ_debug,X_b,initial_theta,eta)
print(theta)
[ 1.60733388 1.98991873 2.80925629 4.00526045 4.9565647 5.82284661 6.86831506 7.92353791 8.75087035 9.900878 10.80269625]