python利用梯度下降法做线性回归

在这里插入图片描述
需要注意的是计算步长一定要小,不然计算误差太大

# -*- coding:utf-8 -*-
#import tensorflow as tf
import numpy
from matplotlib import pyplot
#loss
def compute_error(b ,w ,points):
    totalerror = 0
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        totalerror += (w * x + b - y)**2
    return totalerror / float(len(points))
# loss = (w*x0 + b - y0)**2 +.... + (w*xN + b - yN)**2
#
# w' = w - learningrate*w_gradient
#b' = b - learningrate* b_gradient
def step_gradient(b_current, w_current, points, learningRate):
    b_gradient = 0
    w_gradient = 0
    N = float(len(points))
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        b_gradient += (2/N) * ((w_current * x + b_current) - y)
        w_gradient += (2/N) * x * ((w_current * x + b_current) -y)

    new_b = b_current - (learningRate * b_gradient)
    new_w = w_current - (learningRate * w_gradient)
    return [new_b, new_w]

def gradient_desent_runner(points, starting_b, starting_w, learningRate, num_iteration):
    b = starting_b
    w = starting_w
    for i in range(num_iteration):
        b, w = step_gradient(b, w, points, learningRate)
    return [b, w]
points_x  = numpy.arange(0,50)
points_y =points_x * 3 + 1 + numpy.random.uniform(-10,10,50)
points = numpy.column_stack((points_x, points_y))
#这里的数据我们是随机生成的
def run(points):
    initial_w =2.5   #设定w初始值
    initial_b = 1.5   #设定初始值b
    learningRate = 0.00001   #设定步长
    num_iteration = 10000    #设置迭代次数
    print('Begin running .....')
    new_b, new_w = gradient_desent_runner(points, initial_b, initial_w, learningRate, num_iteration)
    error = compute_error(new_b, new_w, points)
    print('the result .....')
    print('b = {}, w = {}, loss = {}'.format(new_b, new_w, error))
    pyplot.scatter(points_x, points_y)
    pyplot.plot(points_x, points_x * new_w + new_b, 'r')
    pyplot.show()
run(points)

最后画图作对比
在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值