import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# 加载数据
data = pd.read_csv('wind_speed_1.csv', header=None)
scaler = MinMaxScaler()
data = scaler.fit_transform(data.values)
# 转换为监督学习问题
def create_dataset(dataset, look_back=10):
X, y = [], []
for i in range(len(dataset)-look_back-1):
X.append(dataset[i:(i+look_back), 0])
y.append(dataset[i + look_back, 0])
return torch.tensor(X), torch.tensor(y)
look_back = 10
X, y = create_dataset(data, look_back=look_back)
# 划分训练集和测试集
train_size = int(len(X) * 0.7)
test_size = len(X) - train_size
train_X, test_X = X[:train_size], X[train_size:]
train_y, test_y = y[:train_size], y[train_size:]
# 定义模型
class LSTM_Attention(nn.Module):
def __init__(self, input_size=1, hidden_size=64, num_layers=1, output_size=1):
super(LSTM_Attention, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.attention = nn.Linear(hidden_size, 1)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
attention_score = self.attention(out)
attention_weight = torch.softmax(attention_score, dim=1)
context_vector = attention_weight * out
context_vector = torch.sum(context_vector, dim=1)
out = self.fc(context_vector)
return out
model = LSTM_Attention(input_size=1, hidden_size=64, output_size=1)
# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 训练模型
num_epochs = 120
batch_size = 64
train_dataset = torch.utils.data.TensorDataset(train_X, train_y)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(inputs.unsqueeze(-1).float())
loss = criterion(outputs.squeeze(), labels.float())
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# 预测并计算误差
with torch.no_grad():
train_predict = model(train_X.unsqueeze(-1).float()).squeeze().tolist()
test_predict = model(test_X.unsqueeze(-1).float()).squeeze().tolist()
train_actual = scaler.inverse_transform(train_y.numpy().reshape(-1, 1)).flatten()
test_actual = scaler.inverse_transform(test_y.numpy().reshape(-1, 1)).flatten()
train_predict = scaler.inverse_transform(np.array(train_predict).reshape(-1, 1)).flatten()
test_predict = scaler.inverse_transform(np.array(test_predict).reshape(-1, 1)).flatten()
train_rmse = ((sum([(train_actual[i]-train_predict[i])**2 for i in range(len(train_actual))]))/len(train_actual))**0.5
test_rmse = ((sum([(test_actual[i]-test_predict[i])**2 for i in range(len(test_actual))]))/len(test_actual))**0.5
print('Train RMSE: {:.4f}'.format(train_rmse))
print('Test RMSE: {:.4f}'.format(test_rmse))
# 可视化结果
plt.figure(figsize=(10, 6))
plt.plot(train_actual, label='Actual')
plt.plot(train_predict, label='Predicted')
plt.xlabel('Time (hours)')
plt.ylabel('Wind Speed (m/s)')
plt.title('Training Set')
plt.legend()
plt.show()
plt.figure(figsize=(10, 6))
plt.plot(test_actual,label='Actual')
plt.plot(test_predict, label='Predicted')
plt.xlabel('Time (hours)')
plt.ylabel('Wind Speed (m/s)')
plt.title('Testing Set')
plt.legend()
plt.show()
完整代码LSTM-attention单变量时间序列预测
于 2023-06-03 16:45:02 首次发布