目的:利用简单神经网络逼近一个函数
第一步:逼近的函数
x=np.linspace(-4,4,100000)
y=3*np.sin(x)+2*np.cos(x)
第二步:神经网络
---------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
===========================================================================
Linear-1 [[100, 1]] [100, 120] 240
Sigmoid-1 [[100, 120]] [100, 120] 0
Linear-2 [[100, 120]] [100, 1] 121
===========================================================================
第三步:
设置优化函数,损失函数,训练
第四步:
结果
下面是放马过来:
import paddle
import numpy as np
import matplotlib.pyplot as plt
x=np.linspace(-4,4,100000)
y=3*np.sin(x)+2*np.cos(x)
class mynet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.ln1=paddle.nn.Linear(1,120)
self.sig=paddle.nn.Sigmoid()
self.ln2=paddle.nn.Linear(120,1)
def forward(self, x):
x=self.ln1(x)
x=self.sig(x)
x=self.ln2(x)
return x
class mydata(paddle.io.Dataset):
def __init__(self,x):
self.x=x;
self.y=3*np.sin(x)+2*np.cos(x)
def __getitem__(self, item):
x=paddle.to_tensor(self.x[item],dtype='float32')
y=paddle.to_tensor(self.y[item],dtype='float32')
return x,y
def __len__(self):
return len(self.x)
dataloader=paddle.io.DataLoader(mydata(x),batch_size=100,shuffle=True)
loss_fn=paddle.nn.MSELoss()
net=mynet()
opt=paddle.optimizer.Adam(learning_rate=0.1,parameters=net.parameters())
for i in range(2):
lossdata=[]
for a,b in dataloader:
opt.clear_grad()
pred=net(a)
ls=loss_fn(pred,b)
ls.backward()
opt.step()
lossdata.append(ls.item())
print(i,np.mean(lossdata))
print("finish")
net.eval()
lss=[]
_,ax=plt.subplots(1,2)
ax[0].plot(x, y, color='red')
for xx in x:
d=paddle.to_tensor(xx, dtype = 'float32')
d=paddle.unsqueeze(d,axis=0)
pred=net(d)
lss.append(pred.item())
ax[1].plot(x, lss, color='blue')
plt.show()