#Datawhale AI夏令营
1、机器翻译的发展历程
基于规则的机器翻译(1950s-1980s)→ 基于统计的机器翻译(1990s-2000s)→基于神经网络机器翻译(2010s-present)
当前机器翻译任务的主流解决方案是基于神经网络进行建模。
TASK1 code:
!pip install torchtext
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchtext.data.utils import get_tokenizer
from collections import Counter
import random
from torch.utils.data import Subset, DataLoader
import time
# 定义数据集类
# 修改TranslationDataset类以处理术语
class TranslationDataset(Dataset):
def __init__(self, filename, terminology):
self.data = []
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
en, zh = line.strip().split('\t')
self.data.append((en, zh))
self.terminology = terminology
# 创建词汇表,注意这里需要确保术语词典中的词也被包含在词汇表中
self.en_tokenizer = get_tokenizer('basic_english')
self.zh_tokenizer = list # 使用字符级分词
en_vocab = Counter(self.terminology.keys()) # 确保术语在词汇表中
zh_vocab = Counter()
for en, zh in self.data:
en_vocab.update(self.en_tokenizer(en))
zh_vocab.update(self.zh_tokenizer(zh))
# 添加术语到词汇表
self.en_vocab = ['<pad>', '<sos>', '<eos>'] + list(self.terminology.keys()) + [word for word, _ in en_vocab.most_common(10000)]
self.zh_vocab = ['<pad>', '<sos>', '<eos>'] + [word for word, _ in zh_vocab.most_common(10000)]
self.en_word2idx = {word: idx for idx, word in enumerate(self.en_vocab)}
self.zh_word2idx = {word: idx for idx, word in enumerate(self.zh_vocab)}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
en, zh = self.data[idx]
en_tensor = torch.tensor([self.en_word2idx.get(word, self.en_word2idx['<sos>']) for word in self.en_tokenizer(en)] + [self.en_word2idx['<eos>']])
zh_tensor = torch.tensor([self.zh_word2idx.get(word, self.zh_word2idx['<sos>']) for word in self.zh_tokenizer(zh)] + [self.zh_word2idx['<eos>']])
return en_tensor, zh_tensor
def collate_fn(batch):
en_batch, zh_batch = [], []
for en_item, zh_item in batch:
en_batch.append(en_item)
zh_batch.append(zh_item)
# 对英文和中文序列分别进行填充
en_batch = nn.utils.rnn.pad_sequence(en_batch, padding_value=0, batch_first=True)
zh_batch = nn.utils.rnn.pad_sequence(zh_batch, padding_value=0, batch_first=True)
return en_batch, zh_batch
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src shape: [batch_size, src_len]
embedded = self.dropout(self.embedding(src))
# embedded shape: [batch_size, src_len, emb_dim]
outputs, hidden = self.rnn(embedded)
# outputs shape: [batch_size, src_len, hid_dim]
# hidden shape: [n_layers, batch_size, hid_dim]
return outputs, hidden
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden):
# input shape: [batch_size, 1]
# hidden shape: [n_layers, batch_size, hid_dim]
embedded = self.dropout(self.embedding(input))
# embedded shape: [batch_size, 1, emb_dim]
output, hidden = self.rnn(embedded, hidden)
# output shape: [batch_size, 1, hid_dim]
# hidden shape: [n_layers, batch_size, hid_dim]
prediction = self.fc_out(output.squeeze(1))
# prediction shape: [batch_size, output_dim]
return prediction, hidden
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
# src shape: [batch_size, src_len]
# trg shape: [batch_size, trg_len]
batch_size = src.shape[0]
trg_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(self.device)
_, hidden = self.encoder(src)
input = trg[:, 0].unsqueeze(1) # Start token
for t in range(1, trg_len):
output, hidden = self.decoder(input, hidden)
outputs[:, t, :] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.argmax(1)
input = trg[:, t].unsqueeze(1) if teacher_force else top1.unsqueeze(1)
return outputs
# 新增术语词典加载部分
def load_terminology_dictionary(dict_file):
terminology = {}
with open(dict_file, 'r', encoding='utf-8') as f:
for line in f:
en_term, ch_term = line.strip().split('\t')
terminology[en_term] = ch_term
return terminology
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, (src, trg) in enumerate(iterator):
src, trg = src.to(device), trg.to(device)
optimizer.zero_grad()
output = model(src, trg)
output_dim = output.shape[-1]
output = output[:, 1:].contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 主函数
if __name__ == '__main__':
start_time = time.time() # 开始计时
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#terminology = load_terminology_dictionary('../dataset/en-zh.dic')
terminology = load_terminology_dictionary('../dataset/en-zh.dic')
# 加载数据
dataset = TranslationDataset('../dataset/train.txt',terminology = terminology)
# 选择数据集的前N个样本进行训练
N = 1000 #int(len(dataset) * 1) # 或者你可以设置为数据集大小的一定比例,如 int(len(dataset) * 0.1)
subset_indices = list(range(N))
subset_dataset = Subset(dataset, subset_indices)
train_loader = DataLoader(subset_dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)
# 定义模型参数
INPUT_DIM = len(dataset.en_vocab)
OUTPUT_DIM = len(dataset.zh_vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
# 初始化模型
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
# 定义优化器和损失函数
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss(ignore_index=dataset.zh_word2idx['<pad>'])
# 训练模型
N_EPOCHS = 10
CLIP = 1
for epoch in range(N_EPOCHS):
train_loss = train(model, train_loader, optimizer, criterion, CLIP)
print(f'Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f}')
# 在训练循环结束后保存模型
torch.save(model.state_dict(), './translation_model_GRU.pth')
end_time = time.time() # 结束计时
# 计算并打印运行时间
elapsed_time_minute = (end_time - start_time)/60
print(f"Total running time: {elapsed_time_minute:.2f} minutes")
import torch
from sacrebleu.metrics import BLEU
from typing import List
# 假设我们已经定义了TranslationDataset, Encoder, Decoder, Seq2Seq类
def load_sentences(file_path: str) -> List[str]:
with open(file_path, 'r', encoding='utf-8') as f:
return [line.strip() for line in f]
# 更新translate_sentence函数以考虑术语词典
def translate_sentence(sentence: str, model: Seq2Seq, dataset: TranslationDataset, terminology, device: torch.device, max_length: int = 50):
model.eval()
tokens = dataset.en_tokenizer(sentence)
tensor = torch.LongTensor([dataset.en_word2idx.get(token, dataset.en_word2idx['<sos>']) for token in tokens]).unsqueeze(0).to(device) # [1, seq_len]
with torch.no_grad():
_, hidden = model.encoder(tensor)
translated_tokens = []
input_token = torch.LongTensor([[dataset.zh_word2idx['<sos>']]]).to(device) # [1, 1]
for _ in range(max_length):
output, hidden = model.decoder(input_token, hidden)
top_token = output.argmax(1)
translated_token = dataset.zh_vocab[top_token.item()]
if translated_token == '<eos>':
break
# 如果翻译的词在术语词典中,则使用术语词典中的词
if translated_token in terminology.values():
for en_term, ch_term in terminology.items():
if translated_token == ch_term:
translated_token = en_term
break
translated_tokens.append(translated_token)
input_token = top_token.unsqueeze(1) # [1, 1]
return ''.join(translated_tokens)
def evaluate_bleu(model: Seq2Seq, dataset: TranslationDataset, src_file: str, ref_file: str, terminology,device: torch.device):
model.eval()
src_sentences = load_sentences(src_file)
ref_sentences = load_sentences(ref_file)
translated_sentences = []
for src in src_sentences:
translated = translate_sentence(src, model, dataset, terminology, device)
translated_sentences.append(translated)
bleu = BLEU()
score = bleu.corpus_score(translated_sentences, [ref_sentences])
return score
# 主函数
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 加载术语词典
terminology = load_terminology_dictionary('../dataset/en-zh.dic')
# 创建数据集实例时传递术语词典
dataset = TranslationDataset('../dataset/train.txt', terminology)
# 定义模型参数
INPUT_DIM = len(dataset.en_vocab)
OUTPUT_DIM = len(dataset.zh_vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
# 初始化模型
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
# 加载训练好的模型
model.load_state_dict(torch.load('./translation_model_GRU.pth'))
# 评估BLEU分数
bleu_score = evaluate_bleu(model, dataset, '../dataset/dev_en.txt', '../dataset/dev_zh.txt', terminology = terminology,device = device)
print(f'BLEU-4 score: {bleu_score.score:.2f}')
N的意义是什么:
N
是一个变量名,通常用大写字母表示,表示这是一个常量。1000
是赋给N
的值,即采样训练集的数量。- 注释部分
# 采样训练集的数量,最大148363
是对这个常量的进一步说明,表明你可以根据需要设置N
的值,但不应超过最大限制。
在机器学习和数据处理中,经常会对数据进行采样,尤其是在数据量很大时,可以通过设置一个较小的 N
来加快训练的速度或者进行快速的原型验证。在这里,N = 1000
表示你选择采样 1000 条数据作为训练集的一部分,而不是使用全部数据。