pytorch中修改现有层及自定义层

1、在现有层上添加参数,Linear层如下,添加weight_c参数

import torch
from torch.nn.parameter import Parameter

from torch.nn.modules.module import Module
from torch.nn import functional as F
from torch.nn import init

class Linear(Module):
    r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`

    Args:
        in_features: size of each input sample
        out_features: size of each output sample
        bias: If set to ``False``, the layer will not learn an additive bias.
            Default: ``True``

    Shape:
        - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
          additional dimensions and :math:`H_{in} = \text{in\_features}`
        - Output: :math:`(N, *, H_{out})` where all but the last dimension
          are the same shape as the input and :math:`H_{out} = \text{out\_features}`.

    Attributes:
        weight: the learnable weights of the module of shape
            :math:`(\text{out\_features}, \text{in\_features})`. The values are
            initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
            :math:`k = \frac{1}{\text{in\_features}}`
        bias:   the learnable bias of the module of shape :math:`(\text{out\_features})`.
                If :attr:`bias` is ``True``, the values are initialized from
                :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                :math:`k = \frac{1}{\text{in\_features}}`

    Examples::

        >>> m = nn.Linear(20, 30)
        >>> input = torch.randn(128, 20)
        >>> output = m(input)
        >>> print(output.size())
        torch.Size([128, 30])
    """
    __constants__ = ['bias', 'in_features', 'out_features']

    def __init__(self, in_features, out_features, bias=True):
        super(Linear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.Tensor(out_features, in_features))
        self.weight_c = Parameter(torch.Tensor(out_features, in_features)) # 添加参数
        if bias:
            self.bias = Parameter(torch.Tensor(out_features))
            self.bias_c = Parameter(torch.Tensor(out_features))#添加
        else:
        	# self.register_parameter用字符串命名,self.bias=None
            self.register_parameter('bias', None)
            self.register_parameter('bias_c', None)
        self.reset_parameters() # 对参数进行初始化

    def reset_parameters(self):
        init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        # init.kaiming_uniform_(self.weight_c, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            init.uniform_(self.bias, -bound, bound)
            # init.uniform_(self.bias_c, -bound, bound)

    def forward(self, input):
        return F.linear(input, self.weight+self.weight_c, self.bias+self.bias_c)

    def extra_repr(self):
        return 'in_features={}, out_features={}, bias={}'.format(
            self.in_features, self.out_features, self.bias is not None
        )

2、自定义层

import torch
import torch.nn as nn
import math
from torch.nn.init import xavier_uniform_


class layers(nn.Module):
    def __init__(self, input_size, out_size, eps=1e-12, bias=True):
        """Construct a layernorm module in the TF style (epsilon inside the square root).
        """
        super(layers, self).__init__()
        self.weight = nn.Parameter(torch.Tensor(input_size, out_size))
        # self.weight = nn.Parameter(torch.ones(input_size, out_size))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_size))
        else:
            self.register_parameter('bias', None)
        self.activation = nn.Tanh()
        self.variance_epsilon = eps
        self._reset_parameters()

    # # 参数初始化
    # def reset_parameters(self):
    #     stdv = 1. / math.sqrt(self.weight.size(0))
    #     self.weight.data.uniform_(-stdv, stdv)
    #     if self.bias is not None:
    #         self.bias.data.uniform_(-stdv, stdv)

    def _reset_parameters(self):
        r"""Initiate parameters in the transformer model."""
        for p in self.parameters():
            if p.dim() > 1:
                xavier_uniform_(p)
            else:
                nn.init.constant(p, 0.1)

    def forward(self, x):
        u = torch.mm(x, self.weight)
        if self.bias is not None:
            u = self.bias + u
        u = self.activation(u)
        return u


inputs = torch.rand(10, 32)
layer = layers(32, 40)
out = layer(inputs)

pytorch中的参数初始化方法总结添加链接描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值