使用CUDA形式训练预测时间序列的LSTM网络范例
import torch
import torch.nn as nn
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout_prob, output_size):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.dropout = nn.Dropout(dropout_prob)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = self.dropout(out[:, -1, :])
out = self.fc(out)
return out
input_size = 10
hidden_size = 64
num_layers = 3
dropout_prob = 0.2
output_size = 1
model = LSTMModel(input_size, hidden_size, num_layers, dropout_prob, output_size)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
input_data = torch.randn(100, 20, input_size).to(device)
target_data = torch.randn(100, output_size).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(100):
model.train()
optimizer.zero_grad()
output = model(input_data)
loss = criterion(output, target_data)
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print(f"Epoch: {epoch+1}, Loss: {loss.item()}")
model.eval()
with torch.no_grad():
test_input = torch.randn(10, 20, input_size).to(device)
prediction = model(test_input)
print("Prediction:", prediction)