需要注意的是计算步长一定要小,不然计算误差太大
# -*- coding:utf-8 -*-
#import tensorflow as tf
import numpy
from matplotlib import pyplot
#loss
def compute_error(b ,w ,points):
totalerror = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
totalerror += (w * x + b - y)**2
return totalerror / float(len(points))
# loss = (w*x0 + b - y0)**2 +.... + (w*xN + b - yN)**2
#
# w' = w - learningrate*w_gradient
#b' = b - learningrate* b_gradient
def step_gradient(b_current, w_current, points, learningRate):
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += (2/N) * ((w_current * x + b_current) - y)
w_gradient += (2/N) * x * ((w_current * x + b_current) -y)
new_b = b_current - (learningRate * b_gradient)
new_w = w_current - (learningRate * w_gradient)
return [new_b, new_w]
def gradient_desent_runner(points, starting_b, starting_w, learningRate, num_iteration):
b = starting_b
w = starting_w
for i in range(num_iteration):
b, w = step_gradient(b, w, points, learningRate)
return [b, w]
points_x = numpy.arange(0,50)
points_y =points_x * 3 + 1 + numpy.random.uniform(-10,10,50)
points = numpy.column_stack((points_x, points_y))
#这里的数据我们是随机生成的
def run(points):
initial_w =2.5 #设定w初始值
initial_b = 1.5 #设定初始值b
learningRate = 0.00001 #设定步长
num_iteration = 10000 #设置迭代次数
print('Begin running .....')
new_b, new_w = gradient_desent_runner(points, initial_b, initial_w, learningRate, num_iteration)
error = compute_error(new_b, new_w, points)
print('the result .....')
print('b = {}, w = {}, loss = {}'.format(new_b, new_w, error))
pyplot.scatter(points_x, points_y)
pyplot.plot(points_x, points_x * new_w + new_b, 'r')
pyplot.show()
run(points)
最后画图作对比