实现思路
- 导入numpy包
- 构建线性回归损失函数模型
- 计算损失函数的梯度,并按梯度下降方法(最小化损失函数)更新参数
- 设置初始化参数,并按3中的方法更新参数
代码
import numpy as np
def compute_loss(w, b, points):
loss = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
loss += ((w*x + b) - y) ** 2
return loss/len(points)
def compute_gradient(w_current, b_current, points, learningrate):
w_gradient = 0
b_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
w_gradient = x * ((w_current*x + b_current) - y) * 2 / N
b_gradient = ((w_current*x + b_current) - y) * 2 / N
w_new = w_current - (learningrate * w_gradient)
b_new = b_current - (learningrate * b_gradient)
return [w_new, b_new]
def gradient_decent(points, w_init, b_init, learningrate, num_train):
w = w_init
b = b_init
for i in range(num_train):
b, w = compute_gradient(w, b, np.array(points), learningrate)
return[w, b]
def main():
points = np.genfromtxt("data.csv", delimiter=",")
learningrate = 0.0001
num_train = 1000
w_initial = 0
b_initial = 0
print("starting... w={0} b={1} loss={2}" .format(w_initial, b_initial, compute_loss(w_initial, b_initial, points)))
print("loading...")
[w, b] = gradient_decent(points, w_initial, b_initial, learningrate, num_train)
print("after {0} times, w = {1}, b = {2}, loss = {3}".format(num_train, w, b, compute_loss(w, b, points)))
main()
参考链接