pycharm的安装
安装pytorch环境
# 使用conda创建一个新的python环境
conda create -n ai-basic python=3.7
# 激活刚创建的python虚拟环境
conda activate ai-basic
# 安装pytorch及相关包
pip install torch torchvision torchaudio
实现反向传播
import torch
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
# 生成一个二次函数数据
X = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
Y = X.pow(2) + 0.2 * torch.rand(X.size())
# 将数据转换成TensorDataset
data_set = TensorDataset(X, Y)
# 定义数据加载器
loader = DataLoader(data_set, batch_size=32, shuffle=True)
# 定义神经网络
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
# 隐藏层
self.hidden = torch.nn.Linear(n_feature, n_hidden)
# 输出层
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
# 隐藏层+激活函数
x = torch.relu(self.hidden(x))
# 输出层
x = self.predict(x)
return x
# 定义网络:10个神经元的隐藏层
net = Net(1, 10, 1)
# 定义损失函数和优化器
loss_func = torch.nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
# 训练
for t in range(100):
for x, y in loader:
# 前向传播
prediction = net(x)
# 计算损失
loss = loss_func(prediction, y)
# 清零反向传播梯度(pytorch的特性,会累加梯度)
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新参数
optimizer.step()
if t % 5 == 0:
print('t:', t, 'loss:', loss.data.numpy())
# 预测
Y_hat = net(X)
# 原始数据
plt.scatter(X.data.numpy(), Y.data.numpy())
# 预测数据
plt.plot(X.data.numpy(), Y_hat.data.numpy(), 'r-', lw=5)
plt.show()