nn.module之全连接层

import torch as t
from torch import nn

class Linear(nn.Module):   # 继承类
    def __init__(self, in_features, out_features):   

        super(Linear, self).__init__()   
        """
        super(),简单地认为成继承父类
        super(子类,self).父类方法(参数)等价于 父类.方法(self,参数)
        super(Linear, self).__init__()等价于nn.Module.__init__(self)
        """
        self.w = nn.Parameter(t.randn(in_features,out_features))
        self.b = nn.Parameter(t.randn(out_features))
        """
        将 w 和 b 封装成parameter,parameter默认需要求导,requires_grad = True
        """
    def forward(self, x):
        """
        forward函数实现向前传播过程
        无需写反向传播函数,nn.Module能够利用autograd自动实现反向传播
        """
        x = x.mm(self.w)
        return x + self.b.expand_as(x)
    
    # 感知器
class Precetron(nn.Module):
    def __init__(self, in_features, hidden_features, out_features):
        nn.Module.__init__(self)
        self.layer1 = Linear(in_features,hidden_features)
        self.layer2 = Linear(hidden_features,out_features)
    def forward(self, x):
        x.layer1(x)
        x = t.sigmoid(x)
        return self.layer2(x)
    
precetron = Precetron(3, 4, 1)
for name, para in precetron.named_parameters():
    print(name, para.size())
    print(name, para)


# layer = Linear(4, 3)  # 输入层4个节点,输出层3个节点
# input = t.randn(2,4)
# output = layer(input)

# for name,parameter in layer.named_parameters():
    """
    Module中的可学习的参数可以通过named_parameters()或者parameters()返回迭代器
    """
#     print(name, parameter)

运行结果

tensor([[ 0.6818,  0.4143,  0.7907],
        [ 0.7157,  0.8337,  0.4280]])
tensor([[ 0.6818,  0.4143,  0.7907],
        [ 0.7157,  0.8337,  0.4280]])

runfile('D:/pycharm/PyTorch/activation-function.py', wdir='D:/pycharm/PyTorch')
tensor([[ 0.9899,  0.3679,  0.0105],
        [ 0.8653,  0.8563,  0.3938]])
tensor([[ 0.9899,  0.3679,  0.0105],
        [ 0.8653,  0.8563,  0.3938]])

runfile('D:/pycharm/PyTorch/activation-function.py', wdir='D:/pycharm/PyTorch')
tensor([[ 0.2604,  0.9914,  0.6838],
        [ 0.3535,  0.0875,  0.3676]])
tensor([[ 0.2604,  0.9914,  0.6838],
        [ 0.3535,  0.0875,  0.3676]])

runfile('D:/pycharm/PyTorch/activation-function.py', wdir='D:/pycharm/PyTorch')
tensor([[ 0.5813,  0.5340,  0.4511],
        [ 0.8174,  0.2174,  0.6633]])
tensor([[ 0.5813,  0.5340,  0.4511],
        [ 0.8174,  0.2174,  0.6633]])

runfile('D:/pycharm/PyTorch/activation-function.py', wdir='D:/pycharm/PyTorch')
tensor([[ 0.4514, -2.1193, -0.3408],
        [-0.9822,  0.9129, -0.5115]])
tensor([[ 0.4514,  0.0000,  0.0000],
        [ 0.0000,  0.9129,  0.0000]])

runfile('D:/pycharm/PyTorch/linear.py', wdir='D:/pycharm/PyTorch')
layer1.w torch.Size([3, 4])
layer1.w Parameter containing:
tensor([[ 0.1174, -0.8916,  0.2460, -0.5851],
        [ 1.7242, -0.0349,  1.6736,  0.5366],
        [-1.0549,  1.5406,  0.9042,  0.6153]])
layer1.b torch.Size([4])
layer1.b Parameter containing:
tensor([-0.8263,  1.0523,  0.8689, -0.8091])
layer2.w torch.Size([4, 1])
layer2.w Parameter containing:
tensor([[ 1.0545],
        [-1.0831],
        [ 0.0553],
        [-0.3681]])
layer2.b torch.Size([1])
layer2.b Parameter containing:
tensor([-1.0901])
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值