#批量梯度下降法
x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]
loss = 10
Iter = 0
theta = [1,1,1]
step = 0.01
precision = 0.0001
MAX_Iters =100000
err0=[0,0,0,0]
err1=[0,0,0,0]
err2=[0,0,0,0]
errSum =[0,0,0]
while(Iter < MAX_Iters and loss > precision):
loss = 0
errSum =[0,0,0]
for index in range(len(x)):
prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
err0[index] = (prediction - y[index])*x[index][0]
errSum[0] += err0[index]
err1[index] = (prediction - y[index])*x[index][1]
errSum[1] += err1[index]
err2[index] = (prediction - y[index])*x[index][2]
errSum[2] += err2[index]
for index in range(3):
theta[index] = theta[index] -step*errSum[index]/4
for index in range(4):
prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
error = (1/(2*4))*(prediction - y[index])**2
loss += error
Iter = Iter+1
print("Iter",Iter)
print(error)
print ("theta", theta)
#随机梯度下降法
import random
x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]
#初始值
theta =[1,1,1]
loss = 10
precision = 0.0001
step = 0.01
MAX_Inter = 100000
Iter = 0
err = [0,0,0]
error = 0
while(Iter < MAX_Inter and loss > precision):
loss = 0
num = random.randint(0, 3)
prediction = theta[0]*x[num][0]+theta[1]*x[num][1]+theta[2]*x[num][2]
err[0] = (prediction - y[num])*x[num][0]
err[1] = (prediction - y[num])*x[num][1]
err[2] = (prediction - y[num])*x[num][2]
for index in range(3):
theta[index] = theta[index] -step*err[index]
for index in range(4):
prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
error = (1/2)*(prediction - y[index])**2
loss += error
Iter = Iter+1
print("Iter",Iter)
print(error)
print ("theta", theta)
#小批量梯度下降法
x = [[1,1,4],[1,2,5],[1,5,1],[1,4,2]]
y = [19,26,19,20]
loss = 10
Iter = 0
theta = [1,1,1]
step = 0.1
precision = 0.0001
MAX_Iters =100000
err0=[0,0,0,0]
err1=[0,0,0,0]
err2=[0,0,0,0]
errSum =[0,0,0]
while(Iter < MAX_Iters and loss > precision):
loss = 0
errSum =[0,0,0]
for index in range(2):
prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
err0[index] = (prediction - y[index])*x[index][0]
errSum[0] += err0[index]
err1[index] = (prediction - y[index])*x[index][1]
errSum[1] += err1[index]
err2[index] = (prediction - y[index])*x[index][2]
errSum[2] += err2[index]
for index in range(3):
theta[index] = theta[index] -step*errSum[index]/4
for index in range(4):
prediction = theta[0]*x[index][0]+theta[1]*x[index][1]+theta[2]*x[index][2]
error = (1/(2*4))*(prediction - y[index])**2
loss += error
Iter = Iter+1
print("Iter",Iter)
print(error)
print ("theta", theta)
GD/BGD/MBGD算法Python代码实现
最新推荐文章于 2024-06-07 08:27:00 发布