import torch
import torch.utils.data as Data
import matplotlib.pyplot as plt
torch.manual_seed(1)# reproducible #设置Cpu
BATCH_SIZE =32#批量大小# fake dataset
x = torch.unsqueeze(torch.linspace(-1,1,1000), dim=1)#初始化数据集 shape 1000 1
y = x.pow(2)+0.1*torch.normal(torch.zeros(*x.size()))#初始化 y# plot dataset 画图
plt.scatter(x.numpy(), y.numpy())
plt.show()# 使用上节内容提到的 data loader 开始小批量 shuffle 打乱
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True,)
创建神经网络
import torch
import torch.nn.functional as F #激励函数#学习率
LR =0.01# 默认的 network 形式 定义一种nnclassNet(torch.nn.Module):def__init__(self):super(Net, self).__init__()
self.hidden = torch.nn.Linear(1,20)# hidden layer
self.predict = torch.nn.Linear(20,1)# output layerdefforward(self, x):
x = F.relu(self.hidden(x))# activation function for hidden layer
x = self.predict(x)# linear outputreturn x
#以上神经网络已经结束 为了对比以下优化器 因此创建如下# 为每个优化器创建一个 net
net_SGD = Net()
net_Momentum = Net()
net_RMSprop = Net()
net_Adam = Net()
nets =[net_SGD, net_Momentum, net_RMSprop, net_Adam]# different optimizers
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9,0.99))
optimizers =[opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
loss_func = torch.nn.MSELoss()
losses_his =[[],[],[],[]]# 记录 training 时不同神经网络的 loss
训练
from 数据集 import loader
from 神经网络 import nets,optimizers,losses_his,loss_func
import matplotlib.pyplot as plt
#外循环测试
EPOCH =12for epoch inrange(EPOCH):print('Epoch: ', epoch)for _,(b_x, b_y)inenumerate(loader):# 对每个优化器, 优化属于他的神经网络for net, opt, l_his inzip(nets, optimizers, losses_his):
output = net(b_x)# get output for every net
loss = loss_func(output, b_y)# compute loss for every net
opt.zero_grad()# clear gradients for next train
loss.backward()# backpropagation, compute gradients
opt.step()# apply gradients
l_his.append(loss.data.numpy())# loss recoder 存储的是损失的差值 因为比较的是哪个优化器更优秀
labels =['SGD','Momentum','RMSprop','Adam']for i, l_his inenumerate(losses_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0,0.2))
plt.show()