一、构建三层神经分类网络
输入维度为1000,隐藏层为100维,输出层为10维。激活函数使用ReLU函数,本程序的写法有两个,为了对比体现,现贴出两个的代码,供对比学习。
1、使用Sequential构建神经网络
import torch
#N为一个batch_size
N,D_in,H,D_out=64,1000,100,10
x=torch.randn(N,D_in)
y=torch.randn(N,D_out)
model=torch.nn.Sequential(
torch.nn.Linear(D_in,H),
torch.nn.ReLU(),
torch.nn.Linear(H,D_out)
)
loss_fn=torch.nn.MSELoss(reduction='sum')
learning_rate=1e-4
optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)
for t in range(500):
y_pred=model(x)
loss=loss_fn(y_pred,y)
model.zero_grad()
loss.backward()
print(t,loss.item())
optimizer.step()
2、继承nn.Module构建神经网络
import torch
#N为一个batch_size
N,D_in,H,D_out=64,1000,100,10
x=torch.randn(N,D_in)
y=torch.randn(N,D_out)
class Net(torch.nn.Module):
def __init__(self,D_in,H,D_out):
super(Net,self).__init__()
self.linear1=torch.nn.Linear(D_in,H)
self.relu=torch.nn.ReLU()
self.linear2=torch.nn.Linear(H,D_out)
def forward(self,x):
#h=self.linear1(x).clamp(min=0)
h=self.linear1(x)
h_relu=self.relu(h)
y_pred=self.linear2(h_relu)
return y_pred
model=Net(D_in,H,D_out)
loss_fn=torch.nn.MSELoss(reduction='sum')
learning_rate=1e-4
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
for t in range(500):
y_pred=model(x)
loss=loss_fn(y_pred,y)
#梯度清零
model.zero_grad()
#反向传播
loss.backward()
print(t,loss.item())
#逐步优化
optimizer.step()