B站 刘二大人老师的课程代码
PyTorch 深度学习实践
反向传播
在图上可以进行梯度传播
import torch
# 样本数据
x_data = [1.0, 2.0, 3.0] # 输入样本
y_data = [2.0, 4.0, 6.0] # 输出样本
w = torch.tensor([1.0]) #权重初始值
w.requires_grad = True #计算梯度,默认不计算
def forward(x):
return x * w
def loss(x, y): #构建计算图
y_pred = forward(x)
return (y_pred - y) ** 2
print("predict (before training)", 4, forward(4).item())
for epoch in range(100):
for x, y in zip(x_data, y_data):
l = loss(x, y) # l是一个张量,tensor主要是在建立计算图 forward, compute the loss
l.backward()
print('\tgrad:', x, y, w.grad.item())
w.data = w.data - 0.01 * w.grad.data # 权重更新时,注意grad也是一个tensor
w.grad.data.zero_() # after update, remember set the grad to zero
print('progress:', epoch, l.item()) # 取出loss使用l.item,不要直接使用l(l是tensor会构建计算图)
print("predict (after training)", 4, forward(4).item())
练习
y ^ = w 1 x 2 + w x + b \hat{y}=w_1x^2+wx+b y^=w1x2+wx+b
import torch
# 样本数据
x_data = [1.0, 2.0, 3.0] # 输入样本
y_data = [2.0, 4.0, 6.0] # 输出样本
w1= torch.tensor([1.0]) #权重初始值
w1.requires_grad = True #计算梯度,默认不计算
w2= torch.tensor([1.0])
w2.requires_grad = True
b= torch.tensor([1.0])
b.requires_grad = True
def forward(x):
return w1*x**2+w2*x+b
def loss(x, y): #构建计算图
y_pred = forward(x)
return (y_pred - y) ** 2
print("predict (before training)", 4, forward(4).item())
for epoch in range(100):
l = loss(1, 2)#为了在for循环之前定义l,以便之后的输出,无实际意义
for x, y in zip(x_data, y_data):
l = loss(x, y) # l是一个张量,tensor主要是在建立计算图 forward, compute the loss
l.backward()
print('\tgrad:', x, y, w1.grad.item(),w2.grad.item(),b.grad.item())
w1.data = w1.data - 0.01 * w1.grad.data # 权重更新时,注意grad也是一个tensor
w2.data = w2.data - 0.01 * w2.grad.data
b.data =b.data - 0.01 * b.grad.data
w1.grad.data.zero_() # after update, remember set the grad to zero
w2.grad.data.zero_()
b.grad.data.zero_()
print('progress:', epoch, l.item()) # 取出loss使用l.item,不要直接使用l(l是tensor会构建计算图)
print("predict (after training)", 4, forward(4).item())