两个全连接层,采用sigmoid函数作为激活函数
import torch as t
from torch import nn
from torch.autograd import Variable as V
#全连接层
class Linear(nn.Module): # 继承nn.Module
def __init__(self,in_features, out_features):
print("调用构造函数")
super(Linear,self).__init__() # 等价于nn.Module.__init__(self)
self.w = nn.Parameter(t.randn(in_features,out_features))
self.b = nn.Parameter(t.randn(out_features))
def forward(self, x):
print("调用forward方法")
x = x.mm(self.w) #矩阵相乘 x*w
return x+self.b.expand_as(x)
class Perceptron(nn.Module):
def __init__(self,in_features,hidden_features,out_features):
nn.Module.__init__(self) # 父类方法的构造函数
self.layer1 = Linear(in_features,hidden_features)
# 此处的Linear是前面自定义的全连接层
self.layer2 = Linear(hidden_features,out_features)
def forward(self,x):
x = self.layer1(x)
x = t.sigmoid(x)
return self.layer2(x)
perceptron = Perceptron(3,4,1)
input = V(t.randn(2,3))
output = perceptron(input)
print(output)
for name, param in perceptron.named_parameters():
print(name,param.size())
结果:
调用构造函数
调用构造函数
调用forward方法
调用forward方法
tensor([[-0.4502],
[-0.3405]], grad_fn=<AddBackward0>)
layer1.w torch.Size([3, 4])
layer1.b torch.Size([4])
layer2.w torch.Size([4, 1])
layer2.b torch.Size([1])