(6)在Pytorch中实现自己定义的层:
在Pytorch中实现自己定义的层需要继承torch.autograd.Function类,然后实现其中的forward和backward方法,代码如下所示:
# -*- coding:utf-8 -*-
import torch
from torch.autograd import Variable
class MyReLU(torch.autograd.Function):
def forward(self, input):
self.save_for_backward(input)
return input.clamp(min=0)
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True<