基于Pytorch的LSTM网络全流程实验(自带数据集,可直接运行出结果,替换自己的数据集即可使用)

LSTM代码

import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset

from sklearn.model_selection import train_test_split


# 生成数据集
def generate_data(num_samples, seq_length):
    # 生成正弦波形数据(类别0)
    half_num_samples=num_samples//2 # 整除
    x_sin = np.array([np.sin(0.06 * np.arange(seq_length) + np.random.rand()) for _ in range(half_num_samples)])
    y_sin = np.zeros(half_num_samples, dtype=np.int64)
    
    # 生成余弦波形数据(类别1),频率略有不同
    x_cos = np.array([np.cos(0.05 * np.arange(seq_length) + np.random.rand()) for _ in range(half_num_samples)])
    y_cos = np.ones(half_num_samples, dtype=np.int64)
    
    # 合并数据
    x = np.concatenate((x_sin, x_cos), axis=0)
    y = np.concatenate((y_sin, y_cos), axis=0)
    
    # 打乱数据
    indices = np.arange(num_samples)
    np.random.shuffle(indices)
    x = x[indices]
    y = y[indices]
    
    # 转换为pytorch张量,LSTM需要3D tensor [batch, seq_len, features],
    # 所以用unsqueeze(2)在第二个维度上增加一个维度
    x_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(2)  
    print('x_tensor.shape:',x_tensor.shape) # x_tensor.shape: torch.Size([1000, 100, 1])
    y_tensor = torch.tensor(y, dtype=torch.int64) # y_tensor.shape: torch.Size([1000])
    print('y_tensor.shape:',y_tensor.shape)
    
    return x_tensor, y_tensor

# LSTM分类模型
class LSTMClassifier(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, n_layers):
        super(LSTMClassifier, self).__init__()
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        
        # LSTM Layer
        self.lstm = nn.LSTM(input_dim, hidden_dim, n_layers, batch_first=True)
        
        # 全连接层(Fully connected layer)
        self.fc = nn.Linear(hidden_dim, output_dim)
    
    # forward方法在模型训练时会自动调用
    def forward(self, x):
        # 用零初始化隐藏层的状态
        h0 = torch.zeros(self.n_layers, x.size(0), self.hidden_dim).requires_grad_()
        
        # 用零初始化细胞状态
        c0 = torch.zeros(self.n_layers, x.size(0), self.hidden_dim).requires_grad_()
        
        out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
        
        out = self.fc(out[:, -1, :])
        return out

# 训练模型
def train_model(model, train_loader, criterion, optimizer, num_epochs):
    for epoch in range(num_epochs):
        for i, (sequences, labels) in enumerate(train_loader):
            # Forward pass
            outputs = model(sequences)
            loss = criterion(outputs, labels)
            
            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (i+1) % 100 == 0:
                print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}], Loss: {loss.item():.4f}')

# 评估模型
def evaluate_model(model, test_loader):
    model.eval()  # Set model to evaluation mode
    with torch.no_grad():
        correct = 0
        total = 0
        for sequences, labels in test_loader:
            outputs = model(sequences)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
        print(f'Accuracy of the model on the test sequences: {100 * correct / total} %')




if __name__=='__main__':
    # ----------------- 生成样本数据 ----------------- 
    num_samples = 1000  # 训练总样本数
    seq_length = 100    # 每个样本的序列长度(可以看作是特征的长度)
    x_data,y_data = generate_data(num_samples, seq_length) # 产生总的样本
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=2) 
    # ----------------- 数据加载器 ----------------- 
    batch_size=64
    train_loader = DataLoader(TensorDataset(x_train, y_train), batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(TensorDataset(x_test, y_test), batch_size=batch_size, shuffle=False)

    # ----------------- 可视化数据 ----------------- 
    plt.figure(figsize=(12, 6))
    for i in range(6):
        plt.subplot(2, 3, i+1)
        plt.plot(x_train[i].numpy().flatten(), label=f"Class {y_train[i].item()}")
        plt.legend()
    plt.tight_layout()
    plt.show() # 不想看数据,可以注释掉这行

    # ----------------- 超参设定 ----------------- 
    input_dim = 10    # 输入特征的维数
    hidden_dim = 50   # LSTM 隐藏层的维度
    output_dim = 2    # 输出的维度(分类的类别数)
    n_layers = 1      # 堆叠的 LSTM 层的数量(默认为1层)

    # ----------------- 创建模型 ----------------- 
    model = LSTMClassifier(input_dim=1, hidden_dim=50, output_dim=2, n_layers=1)
    criterion = nn.CrossEntropyLoss() # 损失函数
    optimizer = optim.Adam(model.parameters(), lr=0.01) # 优化器

    # ----------------- 训练模型 ----------------- 
    train_model(model, train_loader, criterion, optimizer, num_epochs=10)

    # ----------------- 评估模型 ----------------- 
    evaluate_model(model, test_loader)
    

双向LSTM,需要修改哪几个参数?

需要在 nn.LSTM 的构造函数中设置 bidirectional=True。此外,由于双向 LSTM 在每个时间步将会有两个隐藏状态(正向和反向),因此全连接层的输入特征数需要调整为 2 * hidden_size

下面是对您的代码的修改部分,以及需要注意的几个点:

  1. nn.LSTM 中设置 bidirectional=True来启用双向功能。
  2. h0c0 的尺寸都乘以了 2,因为对于每一层 LSTM,我们现在有两个隐藏层状态(一个用于前向传播,一个用于后向传播)。
  3. 调整全连接层的输入特征数,由 hidden_size 改为 2 * hidden_size,以适应双向输出。

修改后的代码如下:

import torch
import torch.nn as nn

# 定义LSTM网络
class LSTM_Model(nn.Module):
    """
    input_size:输入特征的维数
    hidden_size:LSTM 隐藏层的维度
    num_layers:堆叠的 LSTM 层的数量
    class_num: 分类的类别数
    batch_first: 输入和输出的维度顺序是否为 (batch, seq, feature)
    """
    def __init__(self, input_size, hidden_size, num_layers, class_num):
        super(LSTM_Model, self).__init__()
        self.hidden_size = hidden_size 
        self.num_layers = num_layers
        # 修改为双向LSTM
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
        # 修改全连接层输入特征数为 2 * hidden_size
        self.fc = nn.Linear(in_features=2 * hidden_size, out_features=class_num)

    def forward(self, x):
        DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # 初始化隐藏层状态全为0
        h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).requires_grad_().to(DEVICE)  # 注意乘以2,因为是双向
        c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).requires_grad_().to(DEVICE)  # 注意乘以2,因为是双向
        x = x.view(x.size(0), 1, -1)
        out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
        # 只需要最后一层隐层的状态,考虑双向,所以取最后一步的输出
        out = self.fc(out[:, -1, :])  # 这里不用改变
        return out
  • 20
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

惊鸿若梦一书生

谢谢鼓励

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值