B站 刘二大人老师的课程代码
PyTorch 深度学习实践
梯度下降
import torch
import numpy as np
import matplotlib.pyplot as plt
#样本数据
x_data=[1.,2.,3.] #输入样本
y_data=[4.,5.,6.] #输出样本
w=1
#定义模型
def forward(x):
return x*w
#定义损失函数
def cost(xs,ys):
cost=0
for x,y in zip(xs,ys):
y_pred=forward(x)
cost+=(y_pred-y)**2
return cost/len(xs)
#定义梯度
def grident(xs,ys):
grad=0
for x,y in zip(xs,ys):
grad+=2*x*(x*w-y)
return grad/len(xs)
print('Predict (before training)',4,forward(4))
epoch_list=[] #存储损失函数数据
cost_list=[]
for epoch in range(100):
cost_val=cost(x_data,y_data)
grad_val=grident(x_data,y_data)
w-=0.01*grad_val
print('epoch:',epoch,'w=',w,'loss=',cost_val)
epoch_list.append([epoch])
cost_list.append([cost_val]) #添加每次计算损失函数的值
print('Predict (after training)',4,forward(4))
plt.plot(epoch_list,cost_list)
plt.xlabel('epoch')
plt.ylabel('cost')
plt.show()
随机梯度下降
更新每个样本的梯度
import torch
import numpy as np
import matplotlib.pyplot as plt
# -- coding: utf-8 --
# 样本数据
x_data = [1., 2., 3.] # 输入样本
y_data = [4., 5., 6.] # 输出样本
w = 1
# 定义模型
def forward(x):
return x * w
# 定义损失函数
def loss(x, y):
y_pred = forward(x)
return (y_pred-y)**2
# 定义梯度
def gradient(x, y):
return 2 * x * (x * w - y)
print('Predict (before training)', 4, forward(4))
epoch_list = [] # 存储损失函数数据
loss_list = []
for epoch in range(100):
grad_val = gradient(x, y)
w -= 0.01 * grad_val
print("\tgrad:",x,y,grad)
l = loss(x, y)
print('progress:', epoch, 'w=', w, 'loss=', l)
epoch_list.append([epoch])
loss_list.append([l]) # 添加每次计算损失函数的值
print('Predict (after training)', 4, forward(4))
plt.plot(epoch_list, loss_list)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()