PyTorch学习笔记之nn的简单实例

method 1

 1 import torch
 2 from torch.autograd import Variable
 3 
 4 N, D_in, H, D_out = 64, 1000, 100, 10
 5 x = Variable(torch.randn(N, D_in))
 6 y = Variable(torch.randn(N, D_out), requires_grad=False)
 7 
 8 # define our model as a sequence of layers
 9 model = torch.nn.Sequential(
10     torch.nn.Linear(D_in, H),
11     torch.nn.ReLU(),
12     torch.nn.Linear(H, D_out))
13 # nn defines common loss functions
14 loss_fn = torch.nn.MSELoss(size_average=False)
15 
16 learning_rate = 1e-4
17 
18 for t in range(500):
19     # forward pass: feed data to model, and prediction to loss function
20     y_pred = model(x)
21     loss = loss_fn(y_pred, y)
22     
23     # backward pass: compute all gradients
24     model.zero_grad()
25     loss.backward()
26     
27     # make gradient step on each model parameter
28     for param in model.parameters():
29         param.data -= learning_rate * param.grad.data

method 2

 1 import torch
 2 from torch.autograd import Variable
 3 
 4 N, D_in, H, D_out = 64, 1000, 100, 10
 5 x = Variable(torch.randn(N, D_in))
 6 y = Variable(torch.randn(N, D_out), requires_grad=False)
 7 
 8 # define our model as a sequence of layers
 9 model = torch.nn.Sequential(
10     torch.nn.Linear(D_in, H),
11     torch.nn.ReLU(),
12     torch.nn.Linear(H, D_out))
13 # nn defines common loss functions
14 loss_fn = torch.nn.MSELoss(size_average=False)
15 
16 learning_rate = 1e-4
17 # use an optimizer for different update rules
18 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
19 
20 for t in range(500):
21     # forward pass: feed data to model, and prediction to loss function
22     y_pred = model(x)
23     loss = loss_fn(y_pred, y)
24 
25     # backward pass: compute all gradients
26     model.zero_grad()
27     loss.backward()
28 
29     # update all parameters after computing gradients
30     optimizer.step()

PyTorch nn Define new Modules

 1 import torch
 2 from torch.autograd import Variable
 3 import torch.nn as nn
 4 
 5 # define our whole model as a single Module
 6 class TwoLayerNet(nn.Module):
 7     # Initializer sets up two children (Modules can contain modules)
 8     def _init_(self, D_in, H, D_out):
 9         super(TwoLayerNet, self)._init_()
10         self.linear1 = torch.nn.Linear(D_in, H)
11         self.linear2 = torch.nn.Linear(H, D_out)
12 
13     # Define forward pass using child modules and autograd ops on Variables
14     # No need to define backward - autograd will handle it
15     def forward(self, x):
16         h_relu = self.linear1(x).clamp(min=0)
17         y_pred = self.linear2(h_relu)
18         return y_pred
19 
20 N, D_in, H, D_out = 64, 1000, 100, 10
21 x = Variable(torch.randn(N, D_in))
22 y = Variable(torch.randn(N, D_out), requires_grad=False)
23 
24 # Construct and train an instance of our model
25 model = TwoLayerNet(D_in, H, D_out)
26 
27 criterion = torch.nn.MSELoss(size_average=False)
28 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
29 for t in range(500):
30     y_pred = model(x)
31     loss = criterion(y_pred, y)
32 
33     model.zero_grad()
34     loss.backward()
35     optimizer.step()

 

转载于:https://www.cnblogs.com/Joyce-song94/p/7220061.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值