import torch.nn as nn
N,D_in,H,D_out=64,1000,100,10
x=torch.randn(N,D_in)
y=torch.randn(N,D_out)
model=torch.nn.Sequential(
torch.nn.Linear(D_in,H,bias=False),
torch.nn.ReLU(),
torch.nn.Linear(H,D_out,bias=False)
)
torch.nn.init.normal_(model[0].weight)
torch.nn.init.normal_(model[2].weight)
loss_fn=nn.MSELoss(reduction='sum')
learning_rate=1e-4
optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)
for it in range(500):
y_pred=model(x)
loss=loss_fn(y_pred,y)
optimizer.zero_grad()
print(it,loss.item())
loss.backward()
optimizer.step()
备注: model模型的初始化和结果有关
model[0].weight
torch.nn.init.normal_(model[0].weight)
torch.nn.init.normal_(model[2].weight)
model[0].bias
pytorch optim(优化方法)
optimizer =torch.optim.Adam(model.parameters(),lr=learning_rate)
自定义一个model
class TwoLayerNet(torch.nn.Module):
def __init__(self,D_in,H,D_out):
super(TwoLayerNet,self).__init__()
self.linear1=torch.nn.Linear(D_in,H,bias=False)
self.linear2=torch.nn.Linear(H,D_out,bias=False)
def forward(self,x):
y_pred=self.linear2(self.linear1(x).clamp(min=0))
return y_pred
model=TwoLayerNet(D_in,H,D_out)
总结
- 定义输入和输出的数据
- 定义模型
- 定义loss function
- 将模型交给optimizer进行优化(Adam,SGD…)
- 进行训练:
ForWard Pass->Compute Pass->BackWard Pass->Update Model Parameters.