时间序列通道注意力模块


inspiration comes from SEnet

import torch
import torch.nn as nn
import math

class ts_channel_block(nn.Module):
    def __init__(self, channel, ratio=1):
        super(ts_channel_block, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // ratio, bias=False),
                nn.ReLU(inplace=True),
                nn.Linear(channel // ratio, channel, bias=False),
                nn.Sigmoid()
        )

    def forward(self, x):
        b, c, l = x.size() # (B,C,L)
        # y = self.avg_pool(x) # (B,C,L) 通过avg=》 (B,C,1)
        # print("y",y.shape)
        y = self.avg_pool(x).view(b, c) # (B,C,L) 通过avg=》 (B,C,1)
        print("y",y.shape)
        #为了丢给Linear学习,需要view把数据展平开
        # y = self.fc(y).view(b, c, 96)

        y = self.fc(y).view(b,c,1)
        print("y",y.shape)
        return x * y
tsam = ts_channel_block(7)
tensor = torch.randn(8,7,96)
print(tensor.shape)
output = tsam(tensor)
print(output.shape)

torch.Size([8, 7, 96])

y torch.Size([8, 7])

y torch.Size([8, 7, 1])

torch.Size([8, 7, 96])

在Dlinear工作中的Linear.py的修改

class Model(nn.Module): #2022.11.2修改前,这个Model能跑通
    """
    Just one Linear layer
    """
    def __init__(self,configs,channel=7,ratio=1):
        super(Model, self).__init__()

        self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation
        self.fc = nn.Sequential(
                nn.Linear(7,14, bias=False),
                nn.Dropout(p=0.1),
                nn.ReLU(inplace=True) ,
                nn.Linear(14, 7, bias=False),
                nn.Sigmoid()
        )
        self.seq_len = configs.seq_len
        self.pred_len = configs.pred_len

        self.Linear_More_1 = nn.Linear(self.seq_len,self.pred_len * 2)
        self.Linear_More_2 = nn.Linear(self.pred_len*2,self.pred_len)
        self.relu = nn.ReLU()
        self.gelu = nn.GELU()    

        self.drop = nn.Dropout(p=0.1)
        # Use this line if you want to visualize the weights
        # self.Linear.weight = nn.Parameter((1/self.seq_len)*torch.ones([self.pred_len,self.seq_len]))
    def forward(self, x):
        # x: [Batch, Input length, Channel]
        # x = self.Linear(x.permute(0,2,1))
        x = x.permute(0,2,1) # (B,L,C)=》(B,C,L)
        b, c, l = x.size() # (B,C,L)
        # y = self.avg_pool(x) # (B,C,L) 通过avg=》 (B,C,1)
        # print("y",y.shape)
        y = self.avg_pool(x).view(b, c) # (B,C,L) 通过avg=》 (B,C,1)
        # print("y",y.shape)
        #为了丢给Linear学习,需要view把数据展平开
        # y = self.fc(y).view(b, c, 96)
        
        y = self.fc(y).view(b,c,1)

        # print("y",y.shape)
        return (x * y).permute(0,2,1)

  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值