梯度下降法:
import matplotlib.pyplot as plt
x_data = [1.0, 2.0, 3.0, 4.0]
y_data = [2.0, 4.0, 6.0, 8.0]
loss_list = []
w = 1.0
def forward(x):
return w*x
def cost(xs, ys):
sum = 0
for x, y in zip(xs, ys):
pre_y = forward(x)
sum += (y-pre_y)**2
return sum/len(xs)
def gradient(xs, ys):
grad = 0
for x, y in zip(xs, ys):
grad += 2 * x * (x*w-y)
return grad/len(xs)
for epoch in range(100):
loss = cost(x_data, y_data)
loss_list.append(loss)
w = w - 0.1 * gradient(x_data, y_data)
print(epoch, 'w=', w, 'loss=', loss)
plt.plot(loss_list)
plt.ylabel('loss')
plt.show()
实验结果:
随机梯度下降法:
import matplotlib.pyplot as plt
x_data = [1.0, 2.0, 3.0, 4.0]
y_data = [2.0, 4.0, 6.0, 8.0]
loss_list = []
w = 1.0
a = 0.1
def forward(x):
return w * x
def loss(x, y):
pre_y = forward(x)
return (y-pre_y)**2
def gradient(x, y):
return 2 * x * (x * w - y)
for epoch in range(100):
for x, y in zip(x_data, y_data):
los = loss(x,y)
loss_list.append(los)
w = w - a * gradient(x,y)
print(epoch, 'w=', w, 'loss=', los)
plt.plot(loss_list)
plt.ylabel('loss')
plt.show()
实验结果: