import torch
# import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
x=torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y=x.pow(2)+0.2*torch.rand(x.size())
x,y=Variable(x),Variable(y)
class Net(torch.nn.Module):
def _init_(self,n_feature,n_hidden,n_output):
super(Net,self)._init_()
self.hidden=torch.nn.Linear(n_feature,n_hidden)
# 参数:输入,神经元个数
self.predict=torch.nn.Linear(n_hidden,1)
# 输入神经元,输出
def forward(self,x):
x =F.relu(self.hidden(x))
# 激活函数内部参数:使用hidden layer层加工的x
x=self.predict(x)
return x
net = Net(1,10,1)
print (net)
plt.ion()
# 实时打印功能
plt.show()
# 优化神经网络
optimizer=torch.optim.SGD(net.parameters(),lr=0.5)
# optim torch的子模块,传入net的参数 lr学习率
loss_func=torch.nn.MSELoss()
for t in range(100):
# 100步
prediction=net(x)
loss =loss_func(prediction,y)
# 计算二者误差
optimizer.zero_grad()
# 每一次迭代都把上一次的梯度计算结果清零,这样每次优化器里都是本次的梯度,用于更新最新的张量
loss.backward()
optimizer.step()
if t % 5 == 0:
plt.cla()
plt.scatter(x.data.numpy(),y.data.numpy())
plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)
plt.text(0.5,0,'Loss=%4f'%loss.data[0],fontdict={'size':20,'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()