使用Pytorch实现LSTM自动AI作诗(深度学习课大作业版)(内含可复现资源)

一、引言

在自然语言处理领域,利用深度学习生成诗歌是一项充满挑战性与趣味性的任务。本文基于 PyTorch 框架,结合 Word2Vec 词向量技术,构建了一个能够生成七言古诗及藏头诗的 LSTM 模型。通过详细的代码解析与技术细节说明,帮助读者理解从数据处理到模型训练再到诗歌生成的全流程。

二、项目技术栈

技术 / 工具版本作用
PyTorch2.0+深度学习框架
Gensim4.3+训练 Word2Vec 词向量
Matplotlib3.7+绘制训练曲线
Rouge1.0.1文本生成评估(可选)

三、数据预处理与词向量训练

3.1 数据格式规范

  • 输入文件poetry_7.txt,每首诗占一行,纯文本无标点,如:

    plaintext

    仓储十万发关中伟绩今时富郑公有米成珠资缓急此心如秤慎初终
    东华尘土应怜我南海烟霞尽属君惆怅暮春好时节白沙红树草连云
    
  • 核心操作:将连续汉字转换为单字空格分隔格式(如"床前明月光""床 前 明 月 光"),便于 Word2Vec 训练。

3.2 关键代码实现

python

import os
import numpy as np
import pickle
from gensim.models.word2vec import Word2Vec

# 1. 文本分割:生成Word2Vec训练格式
def split_text(file="poetry_7.txt", train_num=6000):
    all_data = open(file, "r", encoding="utf-8").read()
    with open("split_7.txt", "w", encoding="utf-8") as f:
        # 核心:将每个汉字用空格分隔,生成可训练的句子列表
        split_data = " ".join(all_data)  
        f.write(split_data)
    return split_data[:train_num * 64]  # 控制训练数据量(每首诗约64字)

# 2. 训练Word2Vec并保存词向量
def train_vec(vector_size=128, split_file="split_7.txt"):
    param_file = "word_vec.pkl"
    if os.path.exists(param_file):
        return pickle.load(open(param_file, "rb"))
    
    # 加载预处理后的文本(每行作为一个句子)
    sentences = open(split_file, "r", encoding="utf-8").read().split("\n")
    
    # 训练Word2Vec模型(关键参数:词向量维度、最小词频)
    model = Word2Vec(
        sentences=sentences,
        vector_size=vector_size,
        window=5,
        min_count=1,
        workers=8,
        sg=1  # 使用Skip-Gram模型
    )
    
    # 保存核心参数:词向量矩阵、词到索引、索引到词
    word2idx = model.wv.key_to_index
    idx2word = {v: k for k, v in word2idx.items()}
    word_vectors = model.wv.vectors
    
    pickle.dump([word_vectors, word2idx, idx2word], open(param_file, "wb"))
    return [word_vectors, word2idx, idx2word]

四、数据集构建(PyTorch Dataset)

4.1 数据转换逻辑

  • 输入(X):诗句前 n-1 个汉字的索引序列
  • 标签(Y):诗句后 n-1 个汉字的索引序列
  • 核心操作:通过预训练的 Word2Vec 词向量矩阵,将索引序列转换为模型输入的词向量序列。

4.2 代码实现

python

from torch.utils.data import Dataset

class PoetryDataset(Dataset):
    def __init__(self, word_vectors, word2idx, poems):
        self.word_vectors = word_vectors  # 预训练词向量矩阵
        self.word2idx = word2idx          # 词到索引映射
        self.poems = poems                # 原始诗句列表(每行一首)

    def __len__(self):
        return len(self.poems)

    def __getitem__(self, idx):
        poem = self.poems[idx]
        # 转换:汉字序列→索引序列(如"春"→123,"江"→456)
        poem_idx = [self.word2idx[char] for char in poem if char in self.word2idx]  
        
        # 构建输入-标签对(X: 前n-1字,Y: 后n-1字)
        x = poem_idx[:-1]
        y = poem_idx[1:]
        
        # 词向量查表:(seq_len, embedding_dim)
        x_emb = self.word_vectors[x]  
        y = np.array(y, dtype=np.int64)
        
        return x_emb, y

五、LSTM 诗歌生成模型构建

5.1 模型架构图

plaintext

输入汉字序列 → 嵌入层(Word2Vec) → LSTM层(2层,hidden=128) 
→ Dropout层(p=0.3) → 全连接层(输出词表大小) → Softmax预测下一字

5.2 核心代码实现

python

import torch
import torch.nn as nn

class PoetryGenerator(nn.Module):
    def __init__(self, word_vectors, hidden_dim=128):
        super(PoetryGenerator, self).__init__()
        self.embedding_dim = word_vectors.shape[1]  # 词向量维度(如256)
        self.hidden_dim = hidden_dim
        self.vocab_size = word_vectors.shape[0]     # 词表大小(如4738)
        
        # LSTM层定义(关键参数:输入维度、隐藏层维度、层数)
        self.lstm = nn.LSTM(
            input_size=self.embedding_dim,
            hidden_size=hidden_dim,
            num_layers=2,
            batch_first=True,
            dropout=0.3,
            bidirectional=False
        )
        
        # 全连接层:将LSTM输出映射到词表大小
        self.fc = nn.Linear(hidden_dim, self.vocab_size)  
        self.dropout = nn.Dropout(0.3)
        
        # 初始化词向量层(使用预训练权重)
        self.embedding = nn.Embedding.from_pretrained(
            torch.tensor(word_vectors, dtype=torch.float32)
        )

    def forward(self, x, hidden=None):
        # x形状:(batch_size, seq_len)
        x_emb = self.embedding(x)  # 嵌入层输出:(batch_size, seq_len, embedding_dim)
        
        # LSTM前向传播,返回输出和隐藏状态
        lstm_out, hidden = self.lstm(x_emb, hidden)  
        lstm_out = self.dropout(lstm_out)  # 应用Dropout
        
        # 全连接层:(batch_size, seq_len, vocab_size)
        output = self.fc(lstm_out)  
        return output, hidden

    def init_hidden(self, batch_size):
        # 初始化隐藏状态和细胞状态(全零张量)
        return (torch.zeros(2, batch_size, self.hidden_dim),
                torch.zeros(2, batch_size, self.hidden_dim))

六、模型训练与优化

6.1 训练配置参数

参数名含义推荐值
batch_size批量大小32
epochs训练轮数1000
learning_rate学习率0.001
optimizer优化器AdamW
loss_function损失函数交叉熵

6.2 训练循环代码

python

from torch.utils.data import DataLoader

def train_model(model, dataset, params):
    dataloader = DataLoader(dataset, batch_size=params["batch_size"], shuffle=True)
    optimizer = params["optimizer"](model.parameters(), lr=params["learning_rate"])
    criterion = nn.CrossEntropyLoss()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    
    best_loss = float('inf')
    for epoch in range(params["epochs"]):
        model.train()
        total_loss = 0.0
        
        for batch_idx, (x, y) in enumerate(dataloader):
            x, y = x.to(device), y.to(device)
            optimizer.zero_grad()
            
            # 前向传播:输入形状为(batch_size, seq_len)
            output, _ = model(x)  
            # 调整形状以适配CrossEntropyLoss(需为(logits, target))
            output = output.view(-1, model.vocab_size)  
            y = y.view(-1)
            
            loss = criterion(output, y)
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            
            # 每100步打印进度(含困惑度计算)
            if batch_idx % 100 == 0:
                perplexity = np.exp(loss.item())
                print(f"Epoch {epoch+1}/{params['epochs']} | "
                      f"Batch {batch_idx}/{len(dataloader)} | "
                      f"Loss: {loss.item():.4f} | Perplexity: {perplexity:.4f}")
        
        # 保存最优模型(按损失值)
        if total_loss < best_loss:
            best_loss = total_loss
            torch.save(model.state_dict(), "best_model.pth")
    
    print(f"Training complete. Best Loss: {best_loss:.4f}")

七、诗歌生成实现

7.1 随机生成逻辑

  • 核心策略:从随机字符开始,逐字预测下一个字符,直到生成 32 字(七言四句)。
  • 优化点:可引入温度采样(Temperature Sampling)增加生成多样性。

python

def generate_random_poem(model, idx2word, max_len=32):
    model.eval()
    start_idx = np.random.randint(model.vocab_size)
    current_seq = [start_idx]
    
    with torch.no_grad():
        hidden = model.init_hidden(1)
        for _ in range(max_len-1):
            x = torch.tensor([current_seq[-1]]).unsqueeze(0).to(model.device)
            output, hidden = model(x, hidden)
            # 贪心选择:取概率最高的字符
            next_idx = torch.argmax(output, dim=-1).item()  
            current_seq.append(next_idx)
    
    poem = "".join([idx2word[idx] for idx in current_seq])
    return poem

7.2 藏头诗生成逻辑

  • 关键步骤:强制每句首字为用户输入的藏头字,生成后续 7 字(含标点)。

python

def generate_acrostic_poem(model, input_words, idx2word, word2idx):
    model.eval()
    poem = []
    punctuation = [",", "。", ",", "。"]  # 符合古诗韵律的标点
    
    for i, word in enumerate(input_words):
        if word not in word2idx:
            raise ValueError(f"藏头字'{word}'不在词表中!")
        
        current_seq = [word2idx[word]]
        hidden = model.init_hidden(1)
        
        for _ in range(7):  # 每句生成7字(含藏头字)
            x = torch.tensor([current_seq[-1]]).unsqueeze(0).to(model.device)
            output, hidden = model(x, hidden)
            next_idx = torch.argmax(output, dim=-1).item()
            current_seq.append(next_idx)
        
        sentence = "".join([idx2word[idx] for idx in current_seq])
        poem.append(sentence + punctuation[i])
    
    return "".join(poem)

八、训练结果与可视化

8.1 关键指标

  • 最终损失(Loss):0.4947(越低越好)
  • 困惑度(Perplexity):1.6400(越低表示预测越准确)
  • 训练耗时:约 19.5 分钟(RTX 3060 GPU)

8.2 可视化曲线

python

import matplotlib.pyplot as plt

def plot_training_metrics(loss_list, perplexity_list):
    plt.figure(figsize=(15, 5))
    
    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(loss_list, label="Training Loss")
    plt.xlabel("Batch")
    plt.ylabel("Loss")
    plt.title("Loss Curve")
    plt.legend()
    
    # 困惑度曲线
    plt.subplot(1, 2, 2)
    plt.plot(perplexity_list, label="Perplexity")
    plt.xlabel("Batch")
    plt.ylabel("Perplexity")
    plt.title("Perplexity Curve")
    plt.legend()
    
    plt.show()

九、完整代码运行指南

9.1 依赖安装

bash

pip install torch gensim matplotlib

9.2 数据准备

  1. chinese-poetry下载古诗数据
  2. 筛选七言古诗,保存为poetry_7.txt(每行一首,无标点)

9.3 启动训练

python

if __name__ == "__main__":
    # 加载预处理后的词向量和数据集
    word_vectors, word2idx, idx2word = train_vec()
    poems = open("poetry_7.txt", "r", encoding="utf-8").read().split("\n")
    dataset = PoetryDataset(word_vectors, word2idx, poems)
    
    # 初始化模型与参数
    model = PoetryGenerator(word_vectors, hidden_dim=128)
    params = {
        "batch_size": 32,
        "epochs": 1000,
        "learning_rate": 0.001,
        "optimizer": torch.optim.AdamW
    }
    
    # 训练并生成诗歌
    train_model(model, dataset, params)
    print("随机生成古诗:", generate_random_poem(model, idx2word))
    print("藏头诗生成:", generate_acrostic_poem(model, ["深", "度", "学", "习"], idx2word, word2idx))

十、总结与改进方向

10.1 项目亮点

  • 端到端流程:涵盖数据预处理、词向量训练、模型构建、生成推理全流程
  • 可解释性:通过 Word2Vec 词向量可视化,理解汉字语义关联
  • 交互性:支持藏头诗实时生成,增强用户体验

10.2 改进建议

  1. 引入注意力机制:增强模型对长距离依赖的捕捉能力
  2. 多体裁支持:扩展五言古诗、宋词等生成能力
  3. 格律优化:增加平仄校验模块,提升诗歌合规性

十一、完整代码

import os
import numpy as np
import pickle
import torch
import torch.nn as nn
from gensim.models.word2vec import Word2Vec
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import math
import sys
import time
from rouge import Rouge

# 分割文本文件,用于训练Word2Vec模型
def split_text(file="poetry_7.txt", train_num=6000):
    all_data = open(file, "r", encoding="utf-8").read()
    with open("split_7.txt", "w", encoding="utf-8") as f:
        split_data = " ".join(all_data)
        f.write(split_data)
    return split_data[:train_num * 64]

# 训练Word2Vec模型并保存参数
def train_vec(vector_size=128, split_file="split_7.txt", org_file="poetry_7.txt", train_num=6000):
    param_file = "word_vec.pkl"
    org_data = open(org_file, "r", encoding="utf-8").read().split("\n")[:train_num]
    if os.path.exists(split_file):
        all_data_split = open(split_file, "r", encoding="utf-8").read().split("\n")[:train_num]
    else:
        all_data_split = split_text().split("\n")[:train_num]

    if os.path.exists(param_file):
        return org_data, pickle.load(open(param_file, "rb"))

    models = Word2Vec(all_data_split, vector_size=vector_size, workers=7, min_count=1)
    pickle.dump([models.syn1neg, models.wv.key_to_index, models.wv.index_to_key], open(param_file, "wb"))
    return org_data, (models.syn1neg, models.wv.key_to_index, models.wv.index_to_key)

# 创建自定义数据集类,用于加载诗歌数据
class Poetry_Dataset(Dataset):
    def __init__(self, w1, word_2_index, all_data):
        self.w1 = w1
        self.word_2_index = word_2_index
        self.all_data = all_data

    def __getitem__(self, index):
        a_poetry = self.all_data[index]

        a_poetry_index = [self.word_2_index[i] for i in a_poetry]
        xs = a_poetry_index[:-1]
        ys = a_poetry_index[1:]
        xs_embedding = self.w1[xs]

        return xs_embedding, np.array(ys).astype(np.int64)

    def __len__(self):
        return len(self.all_data)

# 创建基于LSTM的诗歌模型
class Poetry_Model_lstm(nn.Module):
    def __init__(self, params):
        super().__init__()
        self.all_data, (self.w1, self.word_2_index, self.index_2_word) = train_vec(vector_size=params["embedding_num"],
                                                                                   train_num=params["train_num"])

        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.hidden_num = params["hidden_num"]
        self.batch_size = params["batch_size"]
        self.epochs = params["epochs"]
        self.lr = params["lr"]
        self.optimizer = params["optimizer"]
        self.word_size, self.embedding_num = self.w1.shape

        self.lstm = nn.LSTM(input_size=self.embedding_num, hidden_size=self.hidden_num, batch_first=True, num_layers=2,
                            bidirectional=False)
        self.dropout = nn.Dropout(0.3)  # 古诗不具有唯一性
        self.flatten = nn.Flatten(0, 1)
        self.linear = nn.Linear(self.hidden_num, self.word_size)
        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, xs_embedding, h_0=None, c_0=None):
        if h_0 == None or c_0 == None:
            h_0 = torch.tensor(np.zeros((2, xs_embedding.shape[0], self.hidden_num), dtype=np.float32))
            c_0 = torch.tensor(np.zeros((2, xs_embedding.shape[0], self.hidden_num), dtype=np.float32))
        h_0 = h_0.to(self.device)
        c_0 = c_0.to(self.device)
        xs_embedding = xs_embedding.to(self.device)
        hidden, (h_0, c_0) = self.lstm(xs_embedding, (h_0, c_0))
        hidden_drop = self.dropout(hidden)
        hidden_flatten = self.flatten(hidden_drop)
        pre = self.linear(hidden_flatten)

        return pre, (h_0, c_0)

    def to_train(self):
        model_result_file = "Poetry_Model_lstm_model.pkl"
        if os.path.exists(model_result_file):
            return pickle.load(open(model_result_file, "rb"))

        # 初始化一个列表来存储loss值
        self.loss_values = []
        self.perplexity_values = []

        dataset = Poetry_Dataset(self.w1, self.word_2_index, self.all_data)
        dataloader = DataLoader(dataset, self.batch_size)
        steps_per_epoch = len(dataloader)

        optimizer = self.optimizer(self.parameters(), self.lr)
        self = self.to(self.device)

        # 记录训练开始时间
        total_start_time = time.time()

        for epoch in range(1, self.epochs + 1):
            # 打印Epoch信息
            print(f'Epoch {epoch}/{self.epochs}')

            for step, (batch_x_embedding, batch_y_index) in enumerate(dataloader, start=1):
                start_time = time.time()  # 开始时间

                self.train()
                batch_x_embedding = batch_x_embedding.to(self.device)
                batch_y_index = batch_y_index.to(self.device)

                pre, _ = self(batch_x_embedding)
                loss = self.cross_entropy(pre, batch_y_index.reshape(-1))

                loss.backward()  # 梯度反传
                optimizer.step()  # 使用优化器更新梯度
                optimizer.zero_grad()  # 梯度清零

                # 记录loss值
                self.loss_values.append(loss.item())
                # 计算困惑度
                perplexity = math.exp(loss.item())
                self.perplexity_values.append(perplexity)

                end_time = time.time()  # 结束时间
                time_elapsed = (end_time - start_time) * 1000  # 真实耗时,转换为毫秒

                # 计算进度条
                progress = (step / steps_per_epoch) * 100
                filled_length = int(progress / 100 * 20)  # 根据进度计算填充长度,最大长度为20个字符
                bar = '=' * filled_length + '-' * (20 - filled_length)  # 总长度为20个字符
                arrow = '>'
                if step == steps_per_epoch:
                    arrow = '='

                # 打印进度信息
                sys.stdout.write(
                    f'\r{step}/{steps_per_epoch} [{bar}] {arrow} - {time_elapsed:.2f}ms/step - loss: {loss.item():.4f} - Perplexity: {perplexity:.4f}')
                sys.stdout.flush()

                if step % 100 == 0:
                    self.generate_poetry_auto()

            # 打印完一个epoch后换行
            print()



        # 在所有epoch训练结束后
        total_end_time = time.time()         # 记录训练结束时间
        total_training_time = (total_end_time - total_start_time) * 1000       # 计算总训练时间# 转换为毫秒
        total_loss = sum(self.loss_values)  # 计算总损失
        average_loss = total_loss / len(self.loss_values)  # 计算平均损失
        total_perplexity = math.exp(average_loss)  # 计算整个数据集的困惑度

        print(f'\nTotal training time for all epochs: {total_training_time / 1000:.2f}s')
        print(f'Average loss for all epochs: {average_loss:.4f}')
        print(f'Total perplexity for all epochs: {total_perplexity:.4f}')

        # 保存模型
        pickle.dump(self, open(model_result_file, "wb"))

        self.plot_metrics()

        return self

    def plot_loss(self):
        plt.figure(figsize=(10, 5))
        plt.plot(self.loss_values, label='Loss')
        plt.xlabel('Batch')
        plt.ylabel('Loss')
        plt.title('Training Loss Over Batches')
        plt.legend()
        plt.grid(True)
        plt.show()

    def plot_metrics(self):
        plt.figure(figsize=(15, 5))

        # 绘制loss图像
        plt.subplot(1, 2, 1)
        plt.plot(self.loss_values, label='Loss')
        plt.xlabel('Batch')
        plt.ylabel('Loss')
        plt.title('Training Loss Over Batches')
        plt.legend()
        plt.grid(True)

        # 绘制perplexity图像
        plt.subplot(1, 2, 2)
        plt.plot(self.perplexity_values, label='Perplexity')
        plt.xlabel('Batch')
        plt.ylabel('Perplexity')
        plt.title('Training Perplexity Over Batches')
        plt.legend()
        plt.grid(True)

        plt.tight_layout()
        plt.show()
    # 自动生成一首诗
    def generate_poetry_auto(self):
        # self.eval()
        result = ""
        word_index = np.random.randint(0, self.word_size, 1)[0]

        result += self.index_2_word[word_index]
        h_0 = torch.tensor(np.zeros((2, 1, self.hidden_num), dtype=np.float32))
        c_0 = torch.tensor(np.zeros((2, 1, self.hidden_num), dtype=np.float32))

        for i in range(31):
            word_embedding = torch.tensor(self.w1[word_index][None][None])
            pre, (h_0, c_0) = self(word_embedding, h_0, c_0)
            word_index = int(torch.argmax(pre))
            result += self.index_2_word[word_index]

        print(result)

    # 生成藏头诗
    def generate_poetry_acrostic(self):

        while True:

            input_text = input("请输入四个汉字(或者回车键随机生成):")[:4]
            if input_text == "":
                self.generate_poetry_auto()
            else:

                result = ""
                punctuation_list = [",", "。", ",", "。"]
                for i in range(4):

                    h_0 = torch.tensor(np.zeros((2, 1, self.hidden_num), dtype=np.float32))
                    c_0 = torch.tensor(np.zeros((2, 1, self.hidden_num), dtype=np.float32))
                    word = input_text[i]
                    try:
                        word_index = self.word_2_index[word]
                    except:
                        word_index = np.random.randint(0, self.word_size, 1)[0]
                        word = self.index_2_word[word_index]
                    result += word

                    for j in range(6):
                        word_index = self.word_2_index[word]
                        word_embedding = torch.tensor(self.w1[word_index][None][None])
                        pre, (h_0, c_0) = model(word_embedding, h_0, c_0)
                        word = self.index_2_word[int(torch.argmax(pre))]
                        result += word
                    result += punctuation_list[i]
                print(result)

# 主函数
if __name__ == "__main__":
    # ---------------------------------  个性化参数  --------------------------------------
    params = {
        "batch_size": 32,  # batch大小
        "epochs": 1000,# epoch大小
        "lr": 0.003,  # 学习率
        "hidden_num": 128,  # 隐层大小
        "embedding_num": 256,  # 词向量维度
        "train_num": 1000,  # 训练的故事数量, 七言古诗:0~6290, 五言古诗:0~2929
        "optimizer": torch.optim.AdamW,  # 优化器 , 注意不要加括号
        "batch_num_test": 100,  # 多少个batch 打印一首古诗进行效果测试
    }

    model = Poetry_Model_lstm(params)  # 模型定义
    model = model.to_train()  # 模型训练
    model.generate_poetry_acrostic()  # 测试藏头诗



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值