本次课程老师以优化效果进行分类,讲解了三套梯度下降算法方法,梯度下降算法,随机梯度下降算法,Mini-batch,本文章采用第二套方法即随机梯度下降法对添加了常数项的方程进行训练。
视频如下:《PyTorch深度学习实践》完结合集_哔哩哔哩_bilibili
代码如下:
import numpy as np
import matplotlib.pyplot as plt
# 获得数据集
def getDataset():
val = [[] for i in range(5)]
for i in range(5):
val[i].append(i)
val[i].append(2.643*i - 1.186)
val = np.array(val)
return val
# 构建线性模型
def getPre(x,w,b):
return w * x + b
# 构建Loss函数
def getLoss(val,w,b):
y_hat = getPre(val[0],w,b)
return (y_hat - val[1]) ** 2
# 构建随机梯度下降函数
def gradient(val,w,b):
w_val = 2 * val[0] * (w * val[0] + b - val[1])
b_val = 2 * (w * val[0] + b - val[1])
return [w_val,b_val]
# 绘制epoch-loss图
def draw_image_epoch_loss(epoch,loss):
plt.plot(epoch,loss)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
def run():
val = getDataset()
w = 0
b = 0
losses = []
print("Starting gradient descent at w={0},b={1}".format(w,b))
for epoch in range(100):
for x_val,y_val in val:
arr = np.array([x_val,y_val])
[w_val,b_val] = gradient(arr,w,b)
w -= 0.01*w_val
b -= 0.01*b_val
print('\tw : ',w,'\tb : ',b)
loss = getLoss(arr,w,b)
losses.append(loss)
print('Epoch = ',epoch,'w = ',w,'b = ',b,'loss = ',loss)
draw_image_epoch_loss(np.array(range(1,101)),losses)
print("after {0} iterations w={1},b={2}".format(epoch,w,b))
if __name__ == '__main__':
run()
结果如下: