from matplotlib import pyplot as plt
import random
#生成数据
def data():
x = range(10)
y = [(2*i+4) for i in x]
for i in range(10):
y[i] = y[i]+random.randint(0,8)-4
return x,y
#使用随机梯度下降训练
def SGD(x,y):
error0 = 0
step_size = 0.001
esp = 1e-6
#a = random.randint(0,4)
#b = random.randint(0,8)
a = 1.2 #将给a,b随机赋初始值
b = 3.5
m = len(x)
n = 0
while True:
i = random.randint(0,m-1)
print(i)
sum0 = a * x[i] + b - y[i]
sum1 = (a * x[i] + b - y[i])*x[i]
error1 = (a * x[i] + b - y[i])**2 #计算模型和结果的误差
a = a - sum1*step_size/m
b = b - sum0*step_size/m
print('a=%f,b=%f,error=%f'%(a,b,error1))
if abs(error1-error0)<esp: #误差很小,可以终止迭代
break
error0 = error1
n = n+1
if n%20==0:
print('第%d次迭代'%n)
if (n>500):
break
return a,b
if __name__ == '__main__':
x,y = data()
a,b = SGD(x,y)
X = range(10)
Y = [(a*i+b) for i in X]
plt.scatter(x,y,color='red')
plt.plot(X,Y)
plt.show()
import matplotlib.pyplot as plt
import random
import matplotlib
#生成数据
def data():
x = range(10)
y = [(2*s+4) for s in x]
for i in range(10):
y[i] = y[i]+random.randint(0,8)-4
return x, y
#使用梯度下降进行训练
def diedai(x,y):
flag = True
a = random.randint(0,5)
b = random.randint(0,10)
m = len(x)
arf = 0.005 #学习率
n = 0
sum1 = 0
sum2 = 0
exp = 0.000001
error0 = 0
error1 = 0
while flag:
for i in range(m): #计算对应的偏导数
sum1 = a*x[i]+b-y[i]
sum2 = (a*x[i]+b-y[i])*x[i]
error1 = (a*x[i]+b-y[i])**2
a = a - sum2*arf/m #对a,b进行更新
b = b - sum1*arf/m
if
简单用python实现几种梯度下降
最新推荐文章于 2022-10-03 18:01:10 发布
本文详细介绍了如何使用Python实现几种常见的梯度下降算法,包括批量梯度下降、随机梯度下降和小批量梯度下降。通过实例代码,阐述了每种方法的原理和应用场景,帮助读者深入理解优化算法在机器学习中的应用。
摘要由CSDN通过智能技术生成