import math
import matplotlib.pyplot as plt
import numpy as np
import time
start = time.perf_counter()
yita = 0.1
e = 1e-10
iters = 5000 # 迭代次数
a = 1
b = 1
a2 = 0.19711522595118727
b2 = -0.170457672878070
def function_2(a, b, x):
return b + a * x
L1 = 0
L2 = 0
k = 1
L3 = []
L4 = []
y1 = np.array([0.5153, 0.5638, 0.6697, 0.7716, 0.8428, 0.8980, 0.9338, 0.9979, 1.0738, 1.2110, 1.3146, 1.6068, 1.9252,
2.4108, 3.1976, 3.7771, 4.6893, 5.6561, 6.0460, 6.9405, 8.0277, 8.8620, 9.5578])
x1 = np.array([1.8668, 2.1781, 2.6923, 3.5334, 4.8198, 6.0794, 7.1177, 7.8973, 8.4402, 8.9677, 9.9215, 10.9655, 12.0333,
13.5823, 15.9878, 18.4937, 21.6314, 26.5810, 31.4045, 34.0903, 40.1513, 47.3104, 51.8942])
for i in range(iters):
e1 = function_2(a, b, x1)
diff_b = 2 * (e1 - y1).mean()
diff_a = 2 * ((e1 - y1) * x1).mean()
L1 = L1 + diff_a**2
L2 = L2 + diff_b**2
j11 = math.sqrt(L1) #j11: float
ja_alpha = yita / (j11+e) # float64
j22 = math.sqrt(L2) #j11: float
jb_alpha = yita / (j22+e) # float64
a = a - ja_alpha * diff_a
b = b - jb_alpha * diff_b
Loss = ((a * x1 + b - y1) ** 2).mean()
L4.append(Loss)
L3.append(k)
print(a, b)
end = time.perf_counter()
print (str(end-start))
plt.plot(L3, L4)
plt.xlabel('x1')
plt.ylabel('loss')
plt.title('Change of loss')
plt.show()
大家可以看看最后损失曲线,收敛在了某一个值附近,接近于0,而且是直线下降。说明速度很快,这里加入了运算时间主要是为了和梯度下降算法其他形式进行对比。至于理论部分,大家可以看看李宏毅老师的讲解。