使用torch.nn实现网络的反向传播
import torch
import math
x = torch.linspace(-math.pi,math.pi,2000)
y = torch.sin(x)
#xx的生成利用到了升维度unsqueeze(-1)对最后一维升维,然后使用广播
p = torch.tensor([1,2,3])
xx = x.unsqueeze(-1).pow(p)
#创建模型
model = torch.nn.Sequential(
torch.nn.Linear(3,1),
torch.nn.Flatten(0,1)
)
#使用均值误差(Mean Squared Error MSE)作为损失函数
loss_fn = torch.nn.MSELoss(reduction='sum')
learning_rate = 1e-6
for t in range(2000):
y_pred = model(xx)
loss = loss_fn(y_pred,y)
if t%100==99:
print(t,loss.item())
model.zero_grad()
loss.backward()
with torch.no_grad():
for param in model.parameters():
param -= learning_rate*param.grad
linear_layer = model[0]
print(f'结果为 y={linear_layer.bias.item()}+{linear_layer.weight[:,0].item()}x+'
f'{linear_layer.weight[:,1].item()}x^2 +{linear_layer.weight[:,2].item()}*x^3')
补充知识:
pytorch中squeeze()和unsqueeze():
unsqueeze()用来增加维度
举个例子:
import torch
a = torch.tensor([1,2,3])
print(a,a.shape)
print(a.unsqueeze(0),a.unsqueeze(0).shape)
tensor([1, 2, 3]) torch.Size([3])
tensor([[1, 2, 3]]) torch.Size([1, 3])
可以看到,使用unsqueeze函数后a的形状的变化,这里的例子实在第1维上增加维度,那么试一下在第二维上改变是什么效果
import torch
a = torch.tensor([1,2,3])
print(a,a.shape)
print(a.unsqueeze(1),a.unsqueeze(1).shape)
import torch
a = torch.tensor([1,2,3])
print(a,a.shape)
print(a.unsqueeze(1),a.unsqueeze(1).shape)
squeeze()用来减少维度
下面的例子中,首先创建一个两行三列的张量b,使用unsqueeze进行对第一维度升维,随后使用squeeze进行降维度
import torch
b = torch.arange(0,6)
b =torch.reshape(b,(2,3))
print(b,b.shape)
c = b.unsqueeze(0)
print(c,c.shape)
d = c.squeeze(0)
print(d,d.shape)
tensor([[0, 1, 2],
[3, 4, 5]]) torch.Size([2, 3])
tensor([[[0, 1, 2],
[3, 4, 5]]]) torch.Size([1, 2, 3])
tensor([[0, 1, 2],
[3, 4, 5]]) torch.Size([2, 3])