梯度下降手写(初步实现)
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def j(theta_tmp):
return 1/(2*len(x))*np.dot((h(theta_tmp) - np.hstack(y)), (h(theta_tmp) - np.hstack(y)).T)
def h(theta_tmp):
return np.dot(theta_tmp, np.hstack((tmp, x)).T)
alpha = float(input())
theta0 = 0.0
theta1 = 0.0
theta = np.matrix([theta0, theta1])
data = np.loadtxt(r'D:\data\machine-learning-ex1\machine-learning-ex1\ex1\ex1data1.txt', delimiter=',')
x = data[:, [0]]
y = data[:, [1]]
tmp = np.ones(x.shape)
print(j(theta))
for i in range(1500):
tmp_theta0 = theta0 - alpha*(1/len(x))*np.dot(h(theta) - np.hstack(y), tmp)
tmp_theta1 = theta1 - alpha*(1/len(x))*np.dot(h(theta) - np.hstack(y), x)
theta0, theta1 = tmp_theta0, tmp_theta1
theta = np.hstack((theta0, theta1))
print(j(theta))
print(theta)
x_draw = np.arange(-10, 5, 0.1)
y_draw = np.arange(-10, 5, 0.1)
X, Y = np.meshgrid(x_draw, y_draw)
Z = list()
for i in range(-100, 50, 1):
tmp_save = list()
for k in range(-100, 50, 1):
tmp_save.append(j([i/10, k/10]))
Z.append(tmp_save)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(X, Y, Z, cmap='rainbow')
plt.show()
C:\Users\G3\Anaconda3\envs\py35\python.exe D:/test/tensorflow_test/test.py
0.01
[[ 32.07273388]]
[[ 4.48338826]]
[[-3.63029144 1.16636235]]
学习率为0.01时,梯度下降后成功的将代价函数值从32降到4.5,还算成功。
当学习率大于0.02时,代价函数趋于无穷,不收敛。
梯度下降与贪心相同,容易陷进局部最优情况。
菜鸡一只,欢迎大佬指正。
ps:matrix与array应该是不可变数据类型。