上期task1链接:
概要:
本次Task2采用的是Seq2Seq模型,代码实现过程主要分为配置环境,数据预处理,模型训练,翻译质量评价
一.配置环境
使用以下指令安装所需库
!pip install torchtext !pip install jieba !pip install sacrebleu
安装spacy https://github.com/explosion/spacy-models/releases,下载压缩包后,上传到dataset目录,
随后使用
!pip install -U pip setuptools wheel -i https://pypi.tuna.tsinghua.edu.cn/simple !pip install -U 'spacy[cuda12x]' -i https://pypi.tuna.tsinghua.edu.cn/simple !pip install ./dataset/en_core_web_trf-3.7.3-py3-none-any.whl
随后在代码中导入库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
from torchtext.data.metrics import bleu_score
from torch.utils.data import Dataset, DataLoader
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from typing import List, Tuple
import jieba
import random
from torch.nn.utils.rnn import pad_sequence
import sacrebleu
import time
import math
二.数据预处理
1.定义提词器
# 定义tokenizer
en_tokenizer = get_tokenizer('spacy', language='en_core_web_trf')
zh_tokenizer = lambda x: list(jieba.cut(x)) # 使用jieba分词
2.读取数据函数
# 读取数据函数
def read_data(file_path: str) -> List[str]:
with open(file_path, 'r', encoding='utf-8') as f:
return [line.strip() for line in f]
3.构建词汇表
# 构建词汇表
def build_vocab(data: List[Tuple[List[str], List[str]]]):
en_vocab = build_vocab_from_iterator(
(en for en, _ in data),
specials=['<unk>', '<pad>', '<bos>', '<eos>']
)
zh_vocab = build_vocab_from_iterator(
(zh for _, zh in data),
specials=['<unk>', '<pad>', '<bos>', '<eos>']
)
en_vocab.set_default_index(en_vocab['<unk>'])
zh_vocab.set_default_index(zh_vocab['<unk>'])
return en_vocab, zh_vocab
4.翻译数据集
class TranslationDataset(Dataset):
def __init__(self, data: List[Tuple[List[str], List[str]]], en_vocab, zh_vocab):
self.data = data
self.en_vocab = en_vocab
self.zh_vocab = zh_vocab
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
en, zh = self.data[idx]
en_indices = [self.en_vocab['<bos>']] + [self.en_vocab[token] for token in en] + [self.en_vocab['<eos>']]
zh_indices = [self.zh_vocab['<bos>']] + [self.zh_vocab[token] for token in zh] + [self.zh_vocab['<eos>']]
return en_indices, zh_indices
5.检测是否为空
def collate_fn(batch):
en_batch, zh_batch = [], []
for en_item, zh_item in batch:
if en_item and zh_item: # 确保两个序列都不为空
# print("都不为空")
en_batch.append(torch.tensor(en_item))
zh_batch.append(torch.tensor(zh_item))
else:
print("存在为空")
if not en_batch or not zh_batch: # 如果整个批次为空,返回空张量
return torch.tensor([]), torch.tensor([])
# src_sequences = [item[0] for item in batch]
# trg_sequences = [item[1] for item in batch]
en_batch = nn.utils.rnn.pad_sequence(en_batch, batch_first=True, padding_value=en_vocab['<pad>'])
zh_batch = nn.utils.rnn.pad_sequence(zh_batch, batch_first=True, padding_value=zh_vocab['<pad>'])
# en_batch = pad_sequence(en_batch, batch_first=True, padding_value=en_vocab['<pad>'])
# zh_batch = pad_sequence(zh_batch, batch_first=True, padding_value=zh_vocab['<pad>'])
return en_batch, zh_batch
6.数据加载函数
# 数据加载函数
def load_data(train_path: str, dev_en_path: str, dev_zh_path: str, test_en_path: str):
# 读取训练数据
train_data = read_data(train_path)
train_en, train_zh = zip(*(line.split('\t') for line in train_data))
# 读取开发集和测试集
dev_en = read_data(dev_en_path)
dev_zh = read_data(dev_zh_path)
test_en = read_data(test_en_path)
# 预处理数据
train_processed = preprocess_data(train_en, train_zh)
dev_processed = preprocess_data(dev_en, dev_zh)
test_processed = [(en_tokenizer(en.lower())[:MAX_LENGTH], []) for en in test_en if en.strip()]
# 构建词汇表
global en_vocab, zh_vocab
en_vocab, zh_vocab = build_vocab(train_processed)
# 创建数据集
train_dataset = TranslationDataset(train_processed, en_vocab, zh_vocab)
dev_dataset = TranslationDataset(dev_processed, en_vocab, zh_vocab)
test_dataset = TranslationDataset(test_processed, en_vocab, zh_vocab)
from torch.utils.data import Subset
注意这里的N是你要训练的数据集大小,
假如你要训练所有数据集
请注释掉
N = 20000
indices = list(range(N))
train_dataset = Subset(train_dataset, indices)
# 假设你有10000个样本,你只想用前1000个样本进行测试
#indices = list(range(N))
#train_dataset = Subset(train_dataset, indices)
#设置样本数量为5000
N = 20000
# 创建一个包含前5000个样本的索引列表
indices = list(range(N))
# 使用这些索引来创建一个新的数据集,它只包含前5000个样本
train_dataset = Subset(train_dataset, indices)
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_fn, drop_last=True)
dev_loader = DataLoader(dev_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn, drop_last=True)
test_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn, drop_last=True)
return train_loader, dev_loader, test_loader, en_vocab, zh_vocab
三.模型构建
1.编码器
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.gru = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src = [batch size, src len]
embedded = self.dropout(self.embedding(src))
# embedded = [batch size, src len, emb dim]
outputs, hidden = self.gru(embedded)
# outputs = [batch size, src len, hid dim * n directions]
# hidden = [n layers * n directions, batch size, hid dim]
return outputs, hidden
2.注意力机制
Attention机制允许模型在解码时“关注”源句子中的不同部分。这使得翻译更加准确,尤其是对于长句子。
class Attention(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.attn = nn.Linear(hid_dim * 2, hid_dim)
self.v = nn.Linear(hid_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs):
# hidden = [1, batch size, hid dim]
# encoder_outputs = [batch size, src len, hid dim]
batch_size = encoder_outputs.shape[0]
src_len = encoder_outputs.shape[1]
hidden = hidden.repeat(src_len, 1, 1).transpose(0, 1)
# hidden = [batch size, src len, hid dim]
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
# energy = [batch size, src len, hid dim]
attention = self.v(energy).squeeze(2)
# attention = [batch size, src len]
return F.softmax(attention, dim=1)
3.解码器
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout, attention):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.gru = nn.GRU(hid_dim + emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
self.fc_out = nn.Linear(hid_dim * 2 + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs):
# input = [batch size, 1]
# hidden = [n layers, batch size, hid dim]
# encoder_outputs = [batch size, src len, hid dim]
input = input.unsqueeze(1)
embedded = self.dropout(self.embedding(input))
# embedded = [batch size, 1, emb dim]
a = self.attention(hidden[-1:], encoder_outputs)
# a = [batch size, src len]
a = a.unsqueeze(1)
# a = [batch size, 1, src len]
weighted = torch.bmm(a, encoder_outputs)
# weighted = [batch size, 1, hid dim]
rnn_input = torch.cat((embedded, weighted), dim=2)
# rnn_input = [batch size, 1, emb dim + hid dim]
output, hidden = self.gru(rnn_input, hidden)
# output = [batch size, 1, hid dim]
# hidden = [n layers, batch size, hid dim]
embedded = embedded.squeeze(1)
output = output.squeeze(1)
weighted = weighted.squeeze(1)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
# prediction = [batch size, output dim]
return prediction, hidden
4.Seq2Seq模型
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
# src = [batch size, src len]
# trg = [batch size, trg len]
batch_size = src.shape[0]
trg_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
input = trg[:, 0]
for t in range(1, trg_len):
output, hidden = self.decoder(input, hidden, encoder_outputs)
outputs[:, t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.argmax(1)
input = trg[:, t] if teacher_force else top1
return outputs
5.初始化模型函数
# 初始化模型
def initialize_model(input_dim, output_dim, emb_dim, hid_dim, n_layers, dropout, device):
attn = Attention(hid_dim)
enc = Encoder(input_dim, emb_dim, hid_dim, n_layers, dropout)
dec = Decoder(output_dim, emb_dim, hid_dim, n_layers, dropout, attn)
model = Seq2Seq(enc, dec, device).to(device)
return model
四.训练
1.定义优化器
# 定义优化器
def initialize_optimizer(model, learning_rate=0.001):
return optim.Adam(model.parameters(), lr=learning_rate)
2.运行时间计算函数
# 运行时间
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
3.训练函数
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
#print(f"Training batch {i}")
src, trg = batch
#print(f"Source shape before: {src.shape}, Target shape before: {trg.shape}")
if src.numel() == 0 or trg.numel() == 0:
#print("Empty batch detected, skipping...")
continue # 跳过空的批次
src, trg = src.to(DEVICE), trg.to(DEVICE)
optimizer.zero_grad()
output = model(src, trg)
output_dim = output.shape[-1]
output = output[:, 1:].contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
loss = criterion(output, trg)
loss.backward()
clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
print(f"Average loss for this epoch: {epoch_loss / len(iterator)}")
return epoch_loss / len(iterator)
4.评估函数
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
#print(f"Evaluating batch {i}")
src, trg = batch
if src.numel() == 0 or trg.numel() == 0:
continue # 跳过空批次
src, trg = src.to(DEVICE), trg.to(DEVICE)
output = model(src, trg, 0) # 关闭 teacher forcing
output_dim = output.shape[-1]
output = output[:, 1:].contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
5.翻译函数
# 翻译函数
def translate_sentence(sentence, src_vocab, trg_vocab, model, device, max_length=50):
model.eval()
#print(sentence) # 打印sentence的内容
if isinstance(sentence, str):
#tokens = [token.lower() for token in en_tokenizer(sentence)]
tokens = [token for token in en_tokenizer(sentence)]
else:
#tokens = [token.lower() for token in sentence]
tokens = [str(token) for token in sentence]
tokens = ['<bos>'] + tokens + ['<eos>']
src_indexes = [src_vocab[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor)
trg_indexes = [trg_vocab['<bos>']]
for i in range(max_length):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden = model.decoder(trg_tensor, hidden, encoder_outputs)
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_vocab['<eos>']:
break
trg_tokens = [trg_vocab.get_itos()[i] for i in trg_indexes]
return trg_tokens[1:-1] # 移除 <bos> 和 <eos>
6.计算BLEU分数
def calculate_bleu(dev_loader, src_vocab, trg_vocab, model, device):
translated_sentences = []
references = []
for src, trg in dev_loader:
src = src.to(device)
translation = translate_sentence(src, src_vocab, trg_vocab, model, device)
# 将翻译结果转换为字符串
translated_sentences.append(' '.join(translation))
# 将每个参考翻译转换为字符串,并添加到references列表中
for t in trg:
ref_str = ' '.join([trg_vocab.get_itos()[idx] for idx in t.tolist() if idx not in [trg_vocab['<bos>'], trg_vocab['<eos>'], trg_vocab['<pad>']]])
references.append(ref_str)
print("translated_sentences",translated_sentences[:2])
print("references:",references[6:8])
# 使用`sacrebleu`计算BLEU分数
# 注意:sacrebleu要求references是一个列表的列表,其中每个子列表包含一个或多个参考翻译
bleu = sacrebleu.corpus_bleu(translated_sentences, [references])
# 打印BLEU分数
return bleu.score
7.主循环训练
# 主训练循环
def train_model(model, train_iterator, valid_iterator, optimizer, criterion, N_EPOCHS=10, CLIP=1):
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
#print(f"Starting Epoch {epoch + 1}")
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), './model/best-model_test.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
8.训练的循环函数
# 主训练循环
def train_model(model, train_iterator, valid_iterator, optimizer, criterion, N_EPOCHS=10, CLIP=1):
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
#print(f"Starting Epoch {epoch + 1}")
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), './model/best-model_test.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
9.主体1
统计并打印出数据集大小,包括分类后
# 定义常量
MAX_LENGTH = 100 # 最大句子长度
BATCH_SIZE = 32
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
N = 10000 # 采样训练集的数量
train_path = './dataset/train.txt'
dev_en_path = './dataset/dev_en.txt'
dev_zh_path = './dataset/dev_zh.txt'
test_en_path = './dataset/test_en.txt'
train_loader, dev_loader, test_loader, en_vocab, zh_vocab = load_data(
train_path, dev_en_path, dev_zh_path, test_en_path
)
print(f"英语词汇表大小: {len(en_vocab)}")
print(f"中文词汇表大小: {len(zh_vocab)}")
print(f"训练集大小: {len(train_loader.dataset)}")
print(f"开发集大小: {len(dev_loader.dataset)}")
print(f"测试集大小: {len(test_loader.dataset)}")
10主函数
注意这里的N_EPOCHS是训练轮数,如果你要调整训练轮数就修改N_EPOCHS的大小
if __name__ == '__main__':
N_EPOCHS = 3
CLIP=1
# 模型参数
INPUT_DIM = len(en_vocab)
OUTPUT_DIM = len(zh_vocab)
EMB_DIM = 128
HID_DIM = 256
N_LAYERS = 2
DROPOUT = 0.5
# 初始化模型
model = initialize_model(INPUT_DIM, OUTPUT_DIM, EMB_DIM, HID_DIM, N_LAYERS, DROPOUT, DEVICE)
print(f'The model has {sum(p.numel() for p in model.parameters() if p.requires_grad):,} trainable parameters')
# 定义损失函数
criterion = nn.CrossEntropyLoss(ignore_index=zh_vocab['<pad>'])
# 初始化优化器
optimizer = initialize_optimizer(model)
# 训练模型
train_model(model, train_loader, dev_loader, optimizer, criterion, N_EPOCHS, CLIP)
五.在开发集上评价
注意这里要自己手动建立一个文件名是model的文件夹
否则会中断报错
# 加载最佳模型
model.load_state_dict(torch.load('./model/best-model_test.pt'))
# 计算BLEU分数
bleu_score = calculate_bleu(dev_loader, en_vocab, zh_vocab, model, DEVICE)
print(f'BLEU score = {bleu_score*100:.2f}')
六.在测评集上进行翻译
# 加载最佳模型
#model.load_state_dict(torch.load('../model/best-model_test.pt'))
#%%
with open('./results/submit_test.txt', 'w') as f:
translated_sentences = []
for batch in test_loader: # 遍历所有数据
src, _ = batch
src = src.to(DEVICE)
translated = translate_sentence(src[0], en_vocab, zh_vocab, model, DEVICE) #翻译结果
#print(translated)
results = "".join(translated)
f.write(results + '\n') # 将结果写入文件
七.结论
本baseline由于模型泛化能力太弱,在验证集,测试集效果不好,得分也远低于task1的得分,
每跑一轮Val.PPL基本都在上升,最后结果基本都是重复某些文字翻译
翻译结果如图
得分如图