使用torch.nn创建神经网络模型
pyTorch autograd用于构建计算图和计算gradients,然后pytorch会自动计算gradient
N = 64
D_in = 1000
H = 10
D_out = 10
#训练数据
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
#网络模型
model = torch.nn.Sequential(
torch.nn.Linear(D_in,H), # W1*X+B
torch.nn.ReLU(),
torch.nn.Linear(H, D_out)
)
# model = model.cuda() #在GPU上计算
loss_fn = torch.nn.MSELoss(reduction='sum') #损失函数
learning_rate = 1e-6
for t in range(100):
y_pred = model(x) #forward pass
loss =loss_fn(y_pred , y) #loss
loss.backward() #backward pass
print(loss)
with torch.no_grad():
for param in model.parameters():
param -= learning_rate*param.grad
model.zero_grad() # 参数清零
优化更新参数
使用optim包来更新参数,optim包提供各种不同模型优化方法,包括常见的SGD+momentum、RMSProp、Adam等
#改动以上代码
# model = model.cuda() #在GPU上计算
# torch.nn.init.normal_(model[0].weight)
# torch.nn.init.normal_(model[2].weight)
#定义损失函数
loss_fn = torch.nn.MSELoss(reduction='sum')
learning_rate = 1e-6
#定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for t in range(100):
y_pred = model(x) #forward pass
loss =loss_fn(y_pred, y) #loss
optimizer.zero_grad() # 参数清零
loss.backward() #backward pass
optimizer.step() #更新参数
print(loss)
将模型框架封装起来,要使用时传入超参数进行模型初始化
#封装框架
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet,self).__init__()
#定义神经网络框架
self.linear1 = torch.nn.Linear(D_in,H,bias=False)
self.linear2 = torch.nn.Linear(H, D_out, bias=False)
#前向传播过程
def forward(self, x):
y_pred = self.linear2(self.linear1(x).clamp(min=0))
return y_pred
#初始化一个模型
model = TwoLayerNet(D_in, H, D_out)
loss_fn = torch.nn.MSELoss(reduction='sum') #损失函数
learning_rate = 1e-6
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#训练
for t in range(100):
y_pred = model(x) #forward pass
loss =loss_fn(y_pred, y) #loss
optimizer.zero_grad() # 参数清零
loss.backward() #backward pass
optimizer.step() #更新参数
print(loss)