1.在pycharm中动态显示结果
import matplotlib.pyplot as plt
import torch
import numpy as np
import torch.nn as nn
x=torch.unsqueeze(torch.linspace(-np.pi,np.pi,100),dim=1)#等差数列(起点,终点,个数)并转化成二维数组
y=np.sin(x)+0.5*torch.rand(x.size())
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.predict=nn.Sequential(
nn.Linear(1,20),
nn.ReLU(),
nn.Linear(20,20),
nn.ReLU(),
nn.Linear(20,1)
)
def forward(self,x):#前向传播过程
prediction=self.predict(x)#将x传入网络
return prediction
net=Net()
optimizer=torch.optim.SGD(net.parameters(),lr=0.05)
loss_func=nn.MSELoss()
plt.ion()#打开交互模式
for epoch in range(1000):
out=net(x)
loss=loss_func(out,y)
optimizer.zero_grad()#清除梯度
loss.backward()
optimizer.step()#优化器开始优化
if epoch % 25==0:
plt.cla()#清除上一次绘图
plt.scatter(x,y)
plt.plot(x,out.data.numpy(),"r",lw=5)
plt.text(0.5,0,f'{loss}',fontdict={'size':20,'color':'red'})
plt.pause(0.1)
plt.show()
plt.ioff()
plt.show()#定格最后的显示
2.在jupyter notebook中动态显示
net=Net()
optimizer=torch.optim.SGD(net.parameters(),lr=0.05)
loss_func=nn.MSELoss()
for epoch in range(1000):
out=net(x)
loss=loss_func(out,y)
optimizer.zero_grad()#清除梯度
loss.backward()
optimizer.step()#优化器开始优化
if epoch % 25==0:
#print(f'迭代次数:{epoch}'+f'loss:{loss}')
display.clear_output(wait=True)
plt.scatter(x,y)
plt.plot(x,out.data.numpy(),"r",lw=5)
plt.text(0.5,0,f'{loss}',fontdict={'size':20,'color':'red'})
plt.show()#定格最后的显示
plt.pause(0.1)
jupyter notebook动态显示神经网络解决回归问题