Task1 基于术语词典干预的机器翻译挑战赛学习笔记01 Datawhale AI 夏令营

比赛任务概述:

基于术语词典干预的机器翻译挑战赛选择以英文为源语言,中文为目标语言的机器翻译。本次大赛除英文到中文的双语数据,还提供英中对照的术语词典。参赛队伍需要基于提供的训练数据样本从多语言机器翻译模型的构建与训练,并基于测试集以及术语词典,提供最终的翻译结果,数据包括:

  • 训练集:双语数据:中英14万余双语句对
  • 开发集:英中1000双语句对
  • 测试集:英中1000双语句对
  • 术语词典:英中2226条
赛题链接:

https://challenge.xfyun.cn/topic/info?type=machine-translation-2024&option=tjjg&ch=dw24_AtTCK9

Baseline 代码学习

教程文档及BaseLine代码链接

‌‍‌⁠‍‍⁠‍⁠‬⁠‌‌‍‬‬‌‍‌‍‍‌⁠‌从零入门NLP竞赛 - 飞书云文档 (feishu.cn)

BaseLIne直接运行需注意:代码需单独放在与数据文件同级的文件夹里

在这里插入图片描述

01 定义数据集类

定义数据集类,该类完成以下功能:

  1. 读取数据集、术语表文件
  2. 对训练文本(英文、中文)进行分词
  3. 制作词汇表vocab
  4. 对中英文序列进行填充(待补充:填充后是什么样的形式,为什么这样填充)
# 定义数据集类
# 修改TranslationDataset类以处理术语
class TranslationDataset(Dataset):
    def __init__(self, filename, terminology):
        self.data = []
        with open(filename, 'r', encoding='utf-8') as f:
            for line in f:
                en, zh = line.strip().split('\t')
                self.data.append((en, zh))
        
        self.terminology = terminology
        
        # 创建词汇表,注意这里需要确保术语词典中的词也被包含在词汇表中 ??? 此处怎么实现分词 英文中文, 怎么使用
        self.en_tokenizer = get_tokenizer('basic_english')
        self.zh_tokenizer = list  # 使用字符级分词
        
        en_vocab = Counter(self.terminology.keys())  # 确保术语在词汇表中
        zh_vocab = Counter()
        
        for en, zh in self.data:
            en_vocab.update(self.en_tokenizer(en))
            zh_vocab.update(self.zh_tokenizer(zh))
        
        # 添加术语到词汇表 词汇表vocab 结构? 有什么用?用于词嵌入 将自然语言转换为向量
        self.en_vocab = ['<pad>', '<sos>', '<eos>'] + list(self.terminology.keys()) + [word for word, _ in en_vocab.most_common(10000)]
        self.zh_vocab = ['<pad>', '<sos>', '<eos>'] + [word for word, _ in zh_vocab.most_common(10000)]
        
        self.en_word2idx = {word: idx for idx, word in enumerate(self.en_vocab)}
        self.zh_word2idx = {word: idx for idx, word in enumerate(self.zh_vocab)}


    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        en, zh = self.data[idx]
        en_tensor = torch.tensor([self.en_word2idx.get(word, self.en_word2idx['<sos>']) for word in self.en_tokenizer(en)] + [self.en_word2idx['<eos>']])
        zh_tensor = torch.tensor([self.zh_word2idx.get(word, self.zh_word2idx['<sos>']) for word in self.zh_tokenizer(zh)] + [self.zh_word2idx['<eos>']])
        return en_tensor, zh_tensor

def collate_fn(batch):
    en_batch, zh_batch = [], []
    for en_item, zh_item in batch:
        en_batch.append(en_item)
        zh_batch.append(zh_item)
    
    # 对英文和中文序列分别进行填充
    en_batch = nn.utils.rnn.pad_sequence(en_batch, padding_value=0, batch_first=True)
    zh_batch = nn.utils.rnn.pad_sequence(zh_batch, padding_value=0, batch_first=True)
    
    return en_batch, zh_batch

02 术语字典加载
# 新增术语词典加载部分
def load_terminology_dictionary(dict_file):
    terminology = {}
    with open(dict_file, 'r', encoding='utf-8') as f:
        for line in f:
            en_term, ch_term = line.strip().split('\t')
            terminology[en_term] = ch_term
    return terminology
03 构建模型

模型为encoder-decoder 结构,学习到模型类的书写规范,encoder与decoder连接出如何实现。思考:decoder-only 结构怎么写?

# 构造神经网络 encoder-decoder 结构
class Encoder(nn.Module):
    def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
        super().__init__()
        self.embedding = nn.Embedding(input_dim, emb_dim)
        self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
        self.dropout = nn.Dropout(dropout)

    def forward(self, src):
        # src shape: [batch_size, src_len]
        embedded = self.dropout(self.embedding(src))
        # embedded shape: [batch_size, src_len, emb_dim]
        outputs, hidden = self.rnn(embedded)
        # outputs shape: [batch_size, src_len, hid_dim]
        # hidden shape: [n_layers, batch_size, hid_dim]
        return outputs, hidden

class Decoder(nn.Module):
    def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
        super().__init__()
        self.output_dim = output_dim
        self.embedding = nn.Embedding(output_dim, emb_dim)
        self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
        self.fc_out = nn.Linear(hid_dim, output_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, input, hidden):
        # input shape: [batch_size, 1]
        # hidden shape: [n_layers, batch_size, hid_dim]
        
        embedded = self.dropout(self.embedding(input))
        # embedded shape: [batch_size, 1, emb_dim]
        
        output, hidden = self.rnn(embedded, hidden)
        # output shape: [batch_size, 1, hid_dim]
        # hidden shape: [n_layers, batch_size, hid_dim]
        
        prediction = self.fc_out(output.squeeze(1))
        # prediction shape: [batch_size, output_dim]
        
        return prediction, hidden

class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder, device):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.device = device

    def forward(self, src, trg, teacher_forcing_ratio=0.5):
        # src shape: [batch_size, src_len]
        # trg shape: [batch_size, trg_len]
        
        batch_size = src.shape[0]
        trg_len = trg.shape[1]
        trg_vocab_size = self.decoder.output_dim

        outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(self.device)
        
        _, hidden = self.encoder(src)
        
        input = trg[:, 0].unsqueeze(1)  # Start token
        
        for t in range(1, trg_len):
            output, hidden = self.decoder(input, hidden)
            outputs[:, t, :] = output
            teacher_force = random.random() < teacher_forcing_ratio
            top1 = output.argmax(1)
            input = trg[:, t].unsqueeze(1) if teacher_force else top1.unsqueeze(1)

        return outputs
04 模型训练

模型训练函数,可对训练参数进行调整,影响最终模型效果

def train(model, iterator, optimizer, criterion, clip):
    model.train()
    epoch_loss = 0
    for i, (src, trg) in enumerate(iterator):
        src, trg = src.to(device), trg.to(device)
        optimizer.zero_grad()
        output = model(src, trg)
        output_dim = output.shape[-1]
        output = output[:, 1:].contiguous().view(-1, output_dim)
        trg = trg[:, 1:].contiguous().view(-1)
        loss = criterion(output, trg)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
        optimizer.step()
        epoch_loss += loss.item()
    return epoch_loss / len(iterator)
    
# 主函数
if __name__ == '__main__':
    start_time = time.time()  # 开始计时

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    #terminology = load_terminology_dictionary('../dataset/en-zh.dic')
    terminology = load_terminology_dictionary('../dataset/en-zh.dic')

    # 加载数据
    dataset = TranslationDataset('../dataset/train.txt',terminology = terminology)
    # 选择数据集的前N个样本进行训练
    N = 2000  #int(len(dataset) * 1)  # 或者你可以设置为数据集大小的一定比例,如 int(len(dataset) * 0.1)
    N = int(len(dataset))
    subset_indices = list(range(N))
    subset_dataset = Subset(dataset, subset_indices)
    train_loader = DataLoader(subset_dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)

    # 定义模型参数
    INPUT_DIM = len(dataset.en_vocab)
    OUTPUT_DIM = len(dataset.zh_vocab)
    ENC_EMB_DIM = 256
    DEC_EMB_DIM = 256
    HID_DIM = 512
    N_LAYERS = 2
    ENC_DROPOUT = 0.5
    DEC_DROPOUT = 0.5

    # 初始化模型
    enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
    dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
    model = Seq2Seq(enc, dec, device).to(device)

    # 定义优化器和损失函数
    optimizer = optim.Adam(model.parameters())
    criterion = nn.CrossEntropyLoss(ignore_index=dataset.zh_word2idx['<pad>'])

    # 训练模型
    N_EPOCHS = 10
    CLIP = 1

    for epoch in range(N_EPOCHS):
        train_loss = train(model, train_loader, optimizer, criterion, CLIP)
        print(f'Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f}')
        
    # 在训练循环结束后保存模型
    torch.save(model.state_dict(), './translation_model_GRU.pth')
    
    end_time = time.time()  # 结束计时

    # 计算并打印运行时间
    elapsed_time_minute = (end_time - start_time)/60
    print(f"Total running time: {elapsed_time_minute:.2f} minutes")
05 模型验证

基于BLEU-4对模型进行评估,可根据评分对模型进行调节,BaseLine代码评分过于低,数据清洗,模型训练仍有很大处理空间。

import torch
from sacrebleu.metrics import BLEU
from typing import List

# 假设我们已经定义了TranslationDataset, Encoder, Decoder, Seq2Seq类

def load_sentences(file_path: str) -> List[str]:
    with open(file_path, 'r', encoding='utf-8') as f:
        return [line.strip() for line in f]

# 更新translate_sentence函数以考虑术语词典
def translate_sentence(sentence: str, model: Seq2Seq, dataset: TranslationDataset, terminology, device: torch.device, max_length: int = 50):
    model.eval()
    tokens = dataset.en_tokenizer(sentence)
    tensor = torch.LongTensor([dataset.en_word2idx.get(token, dataset.en_word2idx['<sos>']) for token in tokens]).unsqueeze(0).to(device)  # [1, seq_len]
    
    with torch.no_grad():
        _, hidden = model.encoder(tensor)

    translated_tokens = []
    input_token = torch.LongTensor([[dataset.zh_word2idx['<sos>']]]).to(device)  # [1, 1]

    for _ in range(max_length):
        output, hidden = model.decoder(input_token, hidden)
        top_token = output.argmax(1)
        translated_token = dataset.zh_vocab[top_token.item()]
        
        if translated_token == '<eos>':
            break
        
        # 如果翻译的词在术语词典中,则使用术语词典中的词
        if translated_token in terminology.values():
            for en_term, ch_term in terminology.items():
                if translated_token == ch_term:
                    translated_token = en_term
                    break
        
        translated_tokens.append(translated_token)
        input_token = top_token.unsqueeze(1)  # [1, 1]

    return ''.join(translated_tokens)

def evaluate_bleu(model: Seq2Seq, dataset: TranslationDataset, src_file: str, ref_file: str, terminology,device: torch.device):
    model.eval()
    src_sentences = load_sentences(src_file)
    ref_sentences = load_sentences(ref_file)
    
    translated_sentences = []
    for src in src_sentences:
        translated = translate_sentence(src, model, dataset, terminology, device)
        translated_sentences.append(translated)
    
    bleu = BLEU()
    score = bleu.corpus_score(translated_sentences, [ref_sentences])
    
    return score

# 主函数
if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载术语词典
    terminology = load_terminology_dictionary('../dataset/en-zh.dic')
    
    # 创建数据集实例时传递术语词典
    dataset = TranslationDataset('../dataset/train.txt', terminology)
    

    # 定义模型参数
    INPUT_DIM = len(dataset.en_vocab)
    OUTPUT_DIM = len(dataset.zh_vocab)
    ENC_EMB_DIM = 256
    DEC_EMB_DIM = 256
    HID_DIM = 512
    N_LAYERS = 2
    ENC_DROPOUT = 0.5
    DEC_DROPOUT = 0.5

    # 初始化模型
    enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
    dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
    model = Seq2Seq(enc, dec, device).to(device)

    # 加载训练好的模型
    model.load_state_dict(torch.load('./translation_model_GRU.pth'))

    # 评估BLEU分数
    bleu_score = evaluate_bleu(model, dataset, '../dataset/dev_en.txt', '../dataset/dev_zh.txt', terminology = terminology,device = device)
    print(f'BLEU-4 score: {bleu_score.score:.2f}')
06 测试集推理

使用验证后的模型进行推理,生成比赛所需翻译后的文件

def inference(model: Seq2Seq, dataset: TranslationDataset, src_file: str, save_dir:str, terminology, device: torch.device):
    model.eval()
    src_sentences = load_sentences(src_file)
    
    translated_sentences = []
    for src in src_sentences:
        translated = translate_sentence(src, model, dataset, terminology, device)
        #print(translated)
        translated_sentences.append(translated)
        #print(translated_sentences)

    # 将列表元素连接成一个字符串,每个元素后换行
    text = '\n'.join(translated_sentences)

    # 打开一个文件,如果不存在则创建,'w'表示写模式
    with open(save_dir, 'w', encoding='utf-8') as f:
        # 将字符串写入文件
        f.write(text)

    #return translated_sentences
# 主函数
if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载术语词典
    terminology = load_terminology_dictionary('../dataset/en-zh.dic')
    # 加载数据集和模型
    dataset = TranslationDataset('../dataset/train.txt',terminology = terminology)

    # 定义模型参数
    INPUT_DIM = len(dataset.en_vocab)
    OUTPUT_DIM = len(dataset.zh_vocab)
    ENC_EMB_DIM = 256
    DEC_EMB_DIM = 256
    HID_DIM = 512
    N_LAYERS = 2
    ENC_DROPOUT = 0.5
    DEC_DROPOUT = 0.5

    # 初始化模型
    enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
    dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
    model = Seq2Seq(enc, dec, device).to(device)

    # 加载训练好的模型
    model.load_state_dict(torch.load('./translation_model_GRU.pth'))
    
    save_dir = '../dataset/submit.txt'
    inference(model, dataset, src_file="../dataset/test_en.txt", save_dir = save_dir, terminology = terminology, device = device)
    print(f"翻译完成!文件已保存到{save_dir}")

模型最终成绩

按照教程一步步运行程序,跑通后上传submit.txt文件,成绩如下:
在这里插入图片描述

第二次提交使用全部训练数据,Epoch=10的条件对模型进行训练。

下一步优化计划:

  1. 尝试数据清洗

    观察数据特征,寻找规律,去除其中的干扰、噪声等无关信息。

  2. 修改模型

​ 将目前的encoder-decoder模型改为decoder-only模型,如transformer模型。

​ 3. 尝试直接把术语表作为训练数据进行训练

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值