GD/BGD/MBGD算法Python代码实现

#批量梯度下降法

x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]

loss = 10
Iter = 0
theta = [1,1,1]
step = 0.01
precision = 0.0001
MAX_Iters =100000
err0=[0,0,0,0]
err1=[0,0,0,0]
err2=[0,0,0,0]
errSum =[0,0,0]
while(Iter < MAX_Iters and loss > precision):
    loss = 0
    errSum =[0,0,0]
    for index in range(len(x)):
        prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
        err0[index] = (prediction - y[index])*x[index][0]
        errSum[0] += err0[index]
        err1[index] = (prediction - y[index])*x[index][1]
        errSum[1] += err1[index]
        err2[index] = (prediction - y[index])*x[index][2]
        errSum[2] += err2[index]
    for index in range(3):
        theta[index] = theta[index] -step*errSum[index]/4
    for index in range(4):
        prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
        error = (1/(2*4))*(prediction - y[index])**2
        loss += error
    Iter = Iter+1
    print("Iter",Iter)
    print(error)
    print ("theta", theta)

#随机梯度下降法
import random
x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]
#初始值
theta =[1,1,1]
loss = 10
precision = 0.0001
step = 0.01
MAX_Inter = 100000
Iter = 0
err = [0,0,0]
error = 0
while(Iter < MAX_Inter and loss > precision):
    loss = 0
    
    num = random.randint(0, 3)
    prediction = theta[0]*x[num][0]+theta[1]*x[num][1]+theta[2]*x[num][2]
    err[0] = (prediction - y[num])*x[num][0]
    err[1] = (prediction - y[num])*x[num][1]
    err[2] = (prediction - y[num])*x[num][2]    
    for index in range(3):
        theta[index] = theta[index] -step*err[index]
    for index in range(4):
        prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
        error = (1/2)*(prediction - y[index])**2
        loss += error
    Iter = Iter+1
    print("Iter",Iter)
    print(error)
    print ("theta", theta)

#小批量梯度下降法
x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]

loss = 10
Iter = 0
theta = [1,1,1]
step = 0.1
precision = 0.0001
MAX_Iters =100000
err0=[0,0,0,0]
err1=[0,0,0,0]
err2=[0,0,0,0]
errSum =[0,0,0]
while(Iter < MAX_Iters and loss > precision):
    loss = 0
    errSum =[0,0,0]
    for index in range(2):
        prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
        err0[index] = (prediction - y[index])*x[index][0]
        errSum[0] += err0[index]
        err1[index] = (prediction - y[index])*x[index][1]
        errSum[1] += err1[index]
        err2[index] = (prediction - y[index])*x[index][2]
        errSum[2] += err2[index]
    for index in range(3):
        theta[index] = theta[index] -step*errSum[index]/4
    for index in range(4):
        prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
        error = (1/(2*4))*(prediction - y[index])**2
        loss += error
    Iter = Iter+1
    print("Iter",Iter)
    print(error)
    print ("theta", theta)

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值