随机梯度下降法

import random
matrix_A = [[1,4], [2,5], [5,1], [4,2]]
Matrix_y = [19,26,19,20]
theta = [2,5]
#学习速率
leraing_rate = 0.005
loss = 50
iters = 1
Eps = 0.0001
#随机梯度下降
while loss>Eps and iters <1000 :
    loss = 0
    i = random.randint(0, 3)
    h = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] 
    theta[0] = theta[0] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][0]
    theta[1] = theta[1] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][1]
    Error = 0
    Error = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] - Matrix_y[i]
    Error = Error*Error
    loss = loss +Error
    iters = iters +1
print ('theta=',theta)
print ('iters=',iters)
#梯度下降
while loss>Eps and iters <1000 :
    loss = 0
    for i in range(4):
        h = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] 
        theta[0] = theta[0] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][0]
        theta[1] = theta[1] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][1]
    for i in range(4):
        Error = 0
        Error = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] - Matrix_y[i]
        Error = Error*Error
        loss = loss +Error
    iters = iters +1
print ('theta=',theta)
print ('iters=',iters)
#批量梯度下降
while loss>Eps and iters <1000 :
    loss = 0
    sampleindex =  random.sample([0,1,2,3],2)
    for i in sampleindex :
        h = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] 
        theta[0] = theta[0] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][0]
        theta[1] = theta[1] + leraing_rate*(Matrix_y[i]-h)*matrix_A[i][1]
    for i in sampleindex :
        Error = 0
        Error = theta[0]*matrix_A[i][0] + theta[1]*matrix_A[i][1] - Matrix_y[i]
        Error = Error*Error
        loss = loss +Error
    iters = iters +1
print ('theta=',theta)
print ('iters=',iters)

在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值