LSTM时间序列预测

LSTM时间序列预测


步骤
1.生成数据集
2.分训练集和测试集,并且需要对数据进行time windows分割
3.创建滑窗数据集
4. 定义lstm模型
5. 定义超参数
6. 定义训练过程
注意的点:
单步预测,输出只取lstm最后一步;
预测过程中上一步的输出作为下一步的输入

关于lstm 模型

rnn = nn.LSTM(10, 20, 2) (input_size,hidden_size,num_layers)
input = torch.randn(5, 3, 10) (seq_len,batch_size,input_size)
h0 = torch.randn(2, 3, 20) (D*num_layers,batch_size,hidden_size)    D = 2 if bidirectional=True else 1
c0 = torch.randn(2, 3, 20) (D*num_layers,batch_size,hidden_size)
output, (hn, cn) = rnn(input, (h0, c0)) (5, 3, 20), (seq_len,batch_size,hidden_size)
import matplotlib.pyplot as plt

import torch
import torch.nn as nn


#1.生成数据集
x = torch.linspace(0,999,1000) #start, end, step
y = torch.sin(x*2*3.1415926/70)

plt.xlim(-5,1005)
plt.xlabel('x')
plt.ylabel('sin(x)')
plt.title("sin")
plt.plot(y.numpy(), color='#800080')
plt.show()

#2.分训练集和测试集,并且需要对数据进行time windows分割
train_y = y[:-70]
test_y = y[-70:]

#3.创建滑窗数据集
def create_data_seq(seq,time_window):
    out = []
    l = len(seq)
    for i in range(l-time_window):
        x_tw = seq[i:i+time_window]
        y_tw = seq[i+time_window:i+time_window+1]
        out.append((x_tw,y_tw))
    return out

time_window = 60
train_data = create_data_seq(train_y,time_window)

#4. 定义lstm模型
class MyLSTMModel(nn.Module):
    def __init__(self,input_size=1, hidden_size=128, out_size=1):
        super(MyLSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.lstm =  nn.LSTM(input_size = input_size, hidden_size=hidden_size, num_layers=1,bidirectional=False)
        self.linear = nn.Linear(in_features=self.hidden_size,out_features=out_size,bias=True)

        self.hidden_state = (torch.zeros(1, 1, self.hidden_size), torch.zeros(1, 1, self.hidden_size)) #(D*num_layers,batch_size,hidden_size)
        
    def forward(self,x):
        out, self.hidden_state = self.lstm(x.view(len(x), 1, -1),self.hidden_state) #x(seq_len,batch_size,hidden_size)

        #out (seq_len,batch_size,hidden_size) = (len(x), 1, self.hidden_size) -> (len(x), self.hidden_size)
        pred = self.linear(out.view(len(x), -1))

        #只返回最后一个cell的值
        return pred[-1]

#5. 定义超参数
learning_rate = 0.00001
epoch = 10
multi_step = 70

model = MyLSTMModel()
mse_loss = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.5,0.999))

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)

#6.定义训练过程
for i in range(epoch):
    #6.1训练
    for x_seq,y_label in train_data:
        x_seq = x_seq.to(device)
        y_label = y_label.to(device)

        model.hidden_state = (torch.zeros(1,1,model.hidden_size).to(device),
                              torch.zeros(1,1,model.hidden_size).to(device))

        pred = model(x_seq)
        loss = mse_loss(pred,y_label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print(f"Epoch {i} Loss: {loss.item()}")
    preds = []
    labels = []

    preds = train_y[-time_window:].tolist()

    #6.2 测试
    for j in range(multi_step):
        test_seq = torch.FloatTensor(preds[-time_window:]).to(device)
        with torch.no_grad():
             model.hidden_state = (torch.zeros(1,1,model.hidden_size).to(device),
                              torch.zeros(1,1,model.hidden_size).to(device))
             y_pred = model(test_seq)
             #迭代式,输出作为下一次的输入
             preds.append(y_pred.item())
    loss = mse_loss(torch.tensor(preds[-multi_step:]),torch.tensor(test_y))
    print(f'performance on test range:{loss}')

    #6.3 可视化
    plt.figure(figsize=(12, 4))
    plt.xlim(700, 999)
    plt.grid(True)
    plt.plot(y.numpy(), color='#8000ff')
    plt.plot(range(999 - multi_step, 999), preds[-multi_step:], color='#ff8000')
    plt.show()

文章来源

  • 4
    点赞
  • 31
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值