深度学习算法transformer(时序预测)模型优化(二)

 

本文采用nn.Transformer,加入了时间编码、位置编码、Token编码对模型进行优化,预测效果明显比上一篇(一)要好,数据集依旧是ETT数据集,代码如下

 

1. 导入必须要的包

import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import plotly.express as px
from sklearn import metrics
from torch.utils.data import Dataset, DataLoader

2. 定义时间编码

#  hour of day /   day of wheek /  day of month /  day of year
def HourOfDay(date):
    """Hour of day encoded as value between [-0.5, 0.5]"""
    return date.hour / 23.0 - 0.5

def DayOfWeek(date):
    """Hour of day encoded as value between [-0.5, 0.5]"""
    return date.dayofweek / 6.0 - 0.5

def DayOfMonth(date):
    """Day of month encoded as value between [-0.5, 0.5]"""
    return (date.day - 1) / 30.0 - 0.5

def DayOfYear(date):
    """Day of year encoded as value between [-0.5, 0.5]"""
    return (date.dayofyear - 1) / 365.0 - 0.5


 3. 定义位置编码

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, device, max_len=5000):
        super(PositionalEncoding, self).__init__()
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * -(np.log(10000.0) / d_model))
        pe = torch.zeros(max_len, d_model)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.pe = pe.unsqueeze(0).transpose(1, 0).to(device)

    def forward(self, x):
        return self.pe[:x.size(0), :]

 4. 定义Token编码

class TokenEmbedding(nn.Module):
    def __init__(self, input_size, d_model, kernal_size, spadding):
        super(TokenEmbedding, self).__init__()
        padding = 1 if torch.__version__ >= '1.5.0' else 2
        self.tokenConv = nn.Conv1d(input_size, d_model,
                                   kernal_size, stride = 1, padding = padding,
                                   padding_mode='circular')
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(m.weight, mode='fan_in',
                                        nonlinearity='leaky_relu')

    def forward(self, x):
        x = self.tokenConv(x.permute(1, 2, 0)).permute(2, 0, 1)
        return x

 5. 定义优化后的transformer网络

class TransformerTimeSeriesModel(nn.Module):
    def __init__(self, input_size, d_model, device, pred_length, nhead,
                 num_encoder_layers, num_decoder_layers, dim_feedforward,
                 kernal_size, padding, output_size, time_stamp_length, dropout=0.1):
        super(TransformerTimeSeriesModel, self).__init__()

        self.pred_length = pred_length
        self.device = device

        self.value_encoding = TokenEmbedding(input_size, d_model, kernal_size, padding)

        self.positional_encoding = PositionalEncoding(d_model, device)
        self.timefeature_encoding = nn.Linear(time_stamp_length, d_model)

        self.transformer = nn.Transformer(d_model=d_model, nhead=nhead,
                                          num_encoder_layers=num_encoder_layers,
                                          num_decoder_layers=num_decoder_layers,
                                          dim_feedforward=dim_feedforward,
                                          dropout=dropout)
        self.fc_out = nn.Linear(d_model, output_size)

    def forward(self, src, src_date, tgt, tgt_date, tgt_mask=None):

        tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(0)).to(self.device)

        src = self.value_encoding(src) + self.positional_encoding(src) + self.timefeature_encoding(src_date)
        tgt = self.value_encoding(tgt) + self.positional_encoding(tgt) + self.timefeature_encoding(tgt_date)

        output = self.transformer(src, tgt, tgt_mask=tgt_mask)

        return self.fc_out(output)[-self.pred_length:,:, :]

6. 定义有关Dataset的数据整理

# 定义Dataset
class get_dataset(Dataset):
    def __init__(self, data_path, seq_length, label_length, pred_length,
                 time_stamp_length, features, train_split, mode):
        self.mode = mode
        self.data_path = data_path
        self.features = features
        self.seq_length = seq_length

        self.label_length = label_length
        self.pred_length = pred_length

        self.time_stamp_length = time_stamp_length

        self.data, self.date_stamp, self.data_max, self.data_min = self.get_data()
        #         print(self.data)
        #         print(self.data[0, :-1, :])
        #         print(self.data[0, -1, -1])
        #         print(self.data[0, -1, -1].unsqueeze(0))
        #         print(self.data[0, -1, -1].unsqueeze(0).unsqueeze(1))
        train_num = int(train_split * len(self.data))
        if self.mode == 'train':
            self.data = self.data[:train_num, :, :]
            self.date_stamp = self.date_stamp[:train_num, :, :]
        else:
            self.data = self.data[train_num:, :, :]
            self.date_stamp = self.date_stamp[train_num:, :, :]

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index, :-self.pred_length, :], \
               self.data[index, (self.seq_length - self.label_length):, :], \
               self.date_stamp[index, :-self.pred_length, :], \
               self.date_stamp[index, (self.seq_length - self.label_length):, :], \
               self.data[index, self.seq_length:, -1].unsqueeze(1), \


    def get_data(self):
        data = pd.read_csv(self.data_path)
        data.index = pd.to_datetime(data['date'])
        data = data.drop('date', axis=1)
        data_max = data.max()
        data_min = data.min()

        data = (data - data_min) / (data_max - data_min)
        num_sample = len(data) - self.seq_length - self.pred_length + 1
        seq_data = torch.zeros(num_sample,
                               self.seq_length + self.pred_length,
                               len(self.features))

        date_stamp = torch.zeros(num_sample,
                               self.seq_length + self.pred_length,
                               self.time_stamp_length)

        #         print(data.iloc[0:0 + self.seq_length + 1, self.features].values)

        for i in range(num_sample):
            seq_data[i] = torch.tensor(data.iloc[i:i + self.seq_length + self.pred_length,
                                       self.features].values)

            time_seq = data.index[i:i + self.seq_length + self.pred_length]

            hourofday = torch.tensor(list(map(lambda date: HourOfDay(date), time_seq)))
            dayofwheek = torch.tensor(list(map(lambda date: DayOfWeek(date), time_seq)))
            dayofmonth = torch.tensor(list(map(lambda date: DayOfMonth(date), time_seq)))
            dayofyear = torch.tensor(list(map(lambda date: DayOfYear(date), time_seq)))

            date_stamp[i] = torch.stack([hourofday, dayofwheek, dayofmonth, dayofyear]).transpose(1, 0)

        #         print(data_max)
        #         print(data_min)

        return seq_data, date_stamp, data_max, data_min

7. 定义训练

def train(model, dataset, epochs, optim, loss_function, device, batch_size, shuffle=True):
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)

    for epoch in range(epochs):
        train_loss = 0
        model.train()
        for x, y, x_date, y_date, label in data_loader:
            x, y, label = x.transpose(1, 0).to(device), y.transpose(1, 0).to(device), label.transpose(1, 0).to(device)
            x_date = x_date.transpose(1, 0).to(device)
            y_date = y_date.transpose(1, 0).to(device)
            # print('x', x.shape)
            # print('y', y.shape)
            # print('label', label.shape)
            pred = model(x, x_date, y, y_date)
            # print('pred', pred)
            # print('pred', pred.shape)

            loss = loss_function(pred, label)

            optim.zero_grad()
            loss.backward()
            optim.step()
            train_loss += loss.item()
        train_loss /= len(data_loader)
        print('epoch / epochs : %d / %d, loss : %.6f' % (epoch, epochs, train_loss))

8. 定义测试

def test(model, dataset, device, batch_size, label_length, pred_length, root_path, shuffle=False):

    model.eval()

    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)

    # print('data_loader : ', len(data_loader))
    # print('dataset : ', len(dataset))

    preds, labels = np.zeros(len(dataset) * pred_length), \
                    np.zeros(len(dataset) * pred_length)
    left, right = 0, 0

    for x, y, x_date, y_date, label in data_loader:

        left = right

        # if len(label) != 32:
        #     print('--')

        right += len(label) * pred_length
        x, y = x.transpose(1, 0).to(device), y.transpose(1, 0).to(device)
        x_date = x_date.transpose(1, 0).to(device)
        y_date = y_date.transpose(1, 0).to(device)

        pred = pred = model(x, x_date, y, y_date).detach().cpu().numpy().flatten()

        # print('right:', right)
        # print('label : ', label.flatten())
        # print('pred : ', pred)
        # print(label.flatten().shape)
        # print(pred.shape)
        preds[left:right] = pred
        labels[left:right] = label.transpose(1, 0).detach().cpu().numpy().flatten()

    preds_ = preds * (dataset.data_max['OT'] - dataset.data_min['OT']) + dataset.data_min['OT']
    labels_ = labels * (dataset.data_max['OT'] - dataset.data_min['OT']) + dataset.data_min['OT']

    np.save(root_path + '_preds.npy', preds)
    np.save(root_path + '_labels.npy', labels)
    return preds_, labels_

9. 定义模型评估指标

def get_metric(pred, label):
    index = np.where(label > 0.01)

    mse = np.mean((label - pred) ** 2)

    r2 = 1 - np.sum((label - pred) ** 2) / np.sum((label - np.mean(label)) ** 2)
    mape = np.abs((pred[index] - label[index]) / label[index]).mean()
    mae = np.abs(label - pred).mean()
    return mse, r2, mape, mae

def model_eva(pred, label):
    fig = px.line(title='transformer模型预测')
    fig.add_scatter(y=label, name='label')
    fig.add_scatter(y=pred, name='pred')
    fig.show()

    #     print(label)
    #     print(pred)
    #     label_nozero = labels[labels == 0] = 1e-3
    mse, r2, mape, mae = get_metric(pred, label)

    print('MSE : %.6f' % (mse))
    print('R2 : %.6f' % (r2))
    print('MAPE : %.6f' % (mape))
    print('MAE : %.6f' % (mae))

10. main函数以及运行结果

seed = 0
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)  
np.random.seed(seed)  
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True


device = 'cuda' if torch.cuda.is_available() else 'cpu'
seq_length = 96
label_length = 48
pred_length = 24
features = [6]                 # [HUFL,HULL,MUFL,MULL,LUFL,LULL,OT]
input_size = len(features)
output_size = 1
epochs = 100
lr = 0.005
batch_size = 32
train_split = 0.8
d_model = 128
nhead = 2
num_encoder_layers = 2
num_decoder_layers = 2
dim_feedforward = 128
dropout = 0.1
kernal_size = 3
paddig = 1
time_stamp_length = 4
root_path = './' + 'seq_' + str(seq_length) + '_label_' + str(label_length) + '_pred_' + str(pred_length)
save_path = root_path + '_transformer.pth'


model = TransformerTimeSeriesModel(input_size, d_model, device, pred_length,
                                   nhead, num_encoder_layers, num_decoder_layers,
                                   dim_feedforward, kernal_size, paddig, output_size,
                                   time_stamp_length, dropout=0.1).to(device)
    
optim = torch.optim.SGD(model.parameters(), lr=lr)
loss_function = nn.MSELoss()

dataset_train = get_dataset(data_path, seq_length, label_length, pred_length, time_stamp_length, features, train_split = train_split, mode = 'train')
dataset_test = get_dataset(data_path, seq_length, label_length, pred_length, time_stamp_length, features, train_split = train_split, mode = 'test')


train(model, dataset_train, epochs, optim, loss_function, device, batch_size, shuffle = True)

torch.save(model.state_dict(), save_path)

preds, labels = test(model, dataset_test, device, batch_size, label_length, pred_length, root_path, shuffle=False)

model_eva(preds, labels)

  • 9
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值