基于Transformer解决机器翻译任务
Transformer 模型简介
卷积神经网络和循环神经网络都有各自的局限性,所以为了更好地描述文字序列,研究人员提出了新模型 Transformer。
Transformer不再使用循环结构,而是完全通过注意力机制完成对源语言序列和目标语言序列全局依赖的建模。在抽取每个单词的上下文特征时,Transformer 通过自注意力机制(self-attention)衡量上下文中每一个单词对当前单词的重要程度。
Transformer的主要组件包括编码器(Encoder)、解码器(Decoder)和注意力层。其核心是利用多头自注意力机制(Multi-Head Self-Attention),使每个位置的表示不仅依赖于当前位置,还能够直接获取其他位置的表示。
Transformer 模型的基本架构
从宏观角度来看,Transformer的编码器是由多个相同的层叠加而成的,每个层都有两个子层(子层表示为sublayer)。第⼀个子层是多头自注意力(multi-head self-attention)汇聚;第二个子层是基于位置的前馈网络(positionwise feed-forward network)。主要涉及到如下几个模块:嵌入表示层、注意力层、前馈层、残差连接与层归一化、编码器和解码器结构。具体讲解内容见讲义Task3:基于Transformer解决机器翻译任务。
代码详解
下面结合Transformer 模型的基本架构解释baseline代码
step1 配置环境
!mkdir ../model !mkdir ../results !pip install torchtext !pip install jieba !pip install sacrebleu
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.nn.utils import clip_grad_norm_ from torchtext.data.metrics import bleu_score from torch.utils.data import Dataset, DataLoader from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator from typing import List, Tuple import jieba import random from torch.nn.utils.rnn import pad_sequence import sacrebleu import time import math
spacy用于英文的tokenizer,离线下载后上传到dataset文件夹下。
!pip install -U pip setuptools wheel -i https://pypi.tuna.tsinghua.edu.cn/simple !pip install -U 'spacy[cuda12x]' -i https://pypi.tuna.tsinghua.edu.cn/simple !pip install ../dataset/en_core_web_trf-3.7.3-py3-none-any.whl
step2 数据预处理
# 定义tokenizer en_tokenizer = get_tokenizer('spacy', language='en_core_web_trf') zh_tokenizer = lambda x: list(jieba.cut(x)) # 使用jieba分词
# 读取数据函数 def read_data(file_path: str) -> List[str]: with open(file_path, 'r', encoding='utf-8') as f: return [line.strip() for line in f] # 数据预处理函数 def preprocess_data(en_data: List[str], zh_data: List[str]) -> List[Tuple[List[str], List[str]]]: processed_data = [] for en, zh in zip(en_data, zh_data): en_tokens = en_tokenizer(en.lower())[:MAX_LENGTH] zh_tokens = zh_tokenizer(zh)[:MAX_LENGTH] if en_tokens and zh_tokens: # 确保两个序列都不为空 processed_data.append((en_tokens, zh_tokens)) return processed_data # 构建词汇表 def build_vocab(data: List[Tuple[List[str], List[str]]]): en_vocab = build_vocab_from_iterator( (en for en, _ in data), specials=['<unk>', '<pad>', '<bos>', '<eos>'] ) zh_vocab = build_vocab_from_iterator( (zh for _, zh in data), specials=['<unk>', '<pad>', '<bos>', '<eos>'] ) en_vocab.set_default_index(en_vocab['<unk>']) zh_vocab.set_default_index(zh_vocab['<unk>']) return en_vocab, zh_vocab
# 构建翻译集 class TranslationDataset(Dataset): def __init__(self, data: List[Tuple[List[str], List[str]]], en_vocab, zh_vocab): self.data = data self.en_vocab = en_vocab self.zh_vocab = zh_vocab def __len__(self): return len(self.data) def __getitem__(self, idx): en, zh = self.data[idx] en_indices = [self.en_vocab['<bos>']] + [self.en_vocab[token] for token in en] + [self.en_vocab['<eos>']] zh_indices = [self.zh_vocab['<bos>']] + [self.zh_vocab[token] for token in zh] + [self.zh_vocab['<eos>']] return en_indices, zh_indices
# 构建按批次处理函数 def collate_fn(batch): en_batch, zh_batch = [], [] for en_item, zh_item in batch: if en_item and zh_item: # 确保两个序列都不为空 # print("都不为空") en_batch.append(torch.tensor(en_item)) zh_batch.append(torch.tensor(zh_item)) else: print("存在为空") if not en_batch or not zh_batch: # 如果整个批次为空,返回空张量 return torch.tensor([]), torch.tensor([]) # src_sequences = [item[0] for item in batch] # trg_sequences = [item[1] for item in batch] #目的:将中英语言进行序列填充,相同长度便于批量处理 en_batch = nn.utils.rnn.pad_sequence(en_batch, batch_first=True, padding_value=en_vocab['<pad>']) zh_batch = nn.utils.rnn.pad_sequence(zh_batch, batch_first=True, padding_value=zh_vocab['<pad>']) # en_batch = pad_sequence(en_batch, batch_first=True, padding_value=en_vocab['<pad>']) # zh_batch = pad_sequence(zh_batch, batch_first=True, padding_value=zh_vocab['<pad>']) return en_batch, zh_batch
step3 定义处理函数
# 数据加载函数 def load_data(train_path: str, dev_en_path: str, dev_zh_path: str, test_en_path: str): # 读取训练数据 train_data = read_data(train_path) train_en, train_zh = zip(*(line.split('\t') for line in train_data)) # 读取开发集和测试集 dev_en = read_data(dev_en_path) dev_zh = read_data(dev_zh_path) test_en = read_data(test_en_path) # 预处理数据 train_processed = preprocess_data(train_en, train_zh) dev_processed = preprocess_data(dev_en, dev_zh) test_processed = [(en_tokenizer(en.lower())[:MAX_LENGTH], []) for en in test_en if en.strip()] # 构建词汇表 global en_vocab, zh_vocab en_vocab, zh_vocab = build_vocab(train_processed) # 创建数据集 train_dataset = TranslationDataset(train_processed, en_vocab, zh_vocab) dev_dataset = TranslationDataset(dev_processed, en_vocab, zh_vocab) test_dataset = TranslationDataset(test_processed, en_vocab, zh_vocab) from torch.utils.data import Subset # 假设你有10000个样本,你只想用前1000个样本进行测试 indices = list(range(N)) train_dataset = Subset(train_dataset, indices) # 创建数据加载器 train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_fn, drop_last=True) dev_loader = DataLoader(dev_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn, drop_last=True) return train_loader, dev_loader, test_loader, en_vocab, zh_vocab
step4 模型构建(重点,使用transformer模型)
# 编码 class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) # Transformer模型 class TransformerModel(nn.Module): def __init__(self, src_vocab, tgt_vocab, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout): super(TransformerModel, self).__init__() self.transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout) self.src_embedding = nn.Embedding(len(src_vocab), d_model) self.tgt_embedding = nn.Embedding(len(tgt_vocab), d_model) self.positional_encoding = PositionalEncoding(d_model, dropout) self.fc_out = nn.Linear(d_model, len(tgt_vocab)) self.src_vocab = src_vocab self.tgt_vocab = tgt_vocab self.d_model = d_model def forward(self, src, tgt): # 调整src和tgt的维度 src = src.transpose(0, 1) # (seq_len, batch_size) tgt = tgt.transpose(0, 1) # (seq_len, batch_size) src_mask = self.transformer.generate_square_subsequent_mask(src.size(0)).to(src.device) tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size(0)).to(tgt.device) src_padding_mask = (src == self.src_vocab['<pad>']).transpose(0, 1) tgt_padding_mask = (tgt == self.tgt_vocab['<pad>']).transpose(0, 1) src_embedded = self.positional_encoding(self.src_embedding(src) * math.sqrt(self.d_model)) tgt_embedded = self.positional_encoding(self.tgt_embedding(tgt) * math.sqrt(self.d_model)) output = self.transformer(src_embedded, tgt_embedded, src_mask, tgt_mask, None, src_padding_mask, tgt_padding_mask, src_padding_mask) return self.fc_out(output).transpose(0, 1)
# 初始化模型 def initialize_model(src_vocab, tgt_vocab, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1): model = TransformerModel(src_vocab, tgt_vocab, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout) return model
step5 模型训练
# 定义优化器-返回一个学习率为0.001的Adam optimizer def initialize_optimizer(model, learning_rate=0.001): return optim.Adam(model.parameters(), lr=learning_rate) # 运行时间 def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # 训练函数,其中参数iterator是用于遍历的数据,optimizer是用于更新模型的参数,criterion是用于计算训练损失的损失函数,clip用于剪切掉大于clip值的梯度,避免梯度爆炸 def train(model, iterator, optimizer, criterion, clip): model.train() epoch_loss = 0 for i, batch in enumerate(iterator): src, tgt = batch if src.numel() == 0 or tgt.numel() == 0: continue src, tgt = src.to(DEVICE), tgt.to(DEVICE) optimizer.zero_grad() output = model(src, tgt[:, :-1]) output_dim = output.shape[-1] output = output.contiguous().view(-1, output_dim) tgt = tgt[:, 1:].contiguous().view(-1) loss = criterion(output, tgt) loss.backward() clip_grad_norm_(model.parameters(), clip) optimizer.step() epoch_loss += loss.item() return epoch_loss / len(iterator) #评估函数 def evaluate(model, iterator, criterion): model.eval() epoch_loss = 0 with torch.no_grad(): for i, batch in enumerate(iterator): src, tgt = batch if src.numel() == 0 or tgt.numel() == 0: continue src, tgt = src.to(DEVICE), tgt.to(DEVICE) output = model(src, tgt[:, :-1]) output_dim = output.shape[-1] output = output.contiguous().view(-1, output_dim) tgt = tgt[:, 1:].contiguous().view(-1) loss = criterion(output, tgt) epoch_loss += loss.item() return epoch_loss / len(iterator) def translate_sentence(src_indexes, src_vocab, tgt_vocab, model, device, max_length=50): model.eval() src_tensor = src_indexes.unsqueeze(0).to(device) # 添加批次维度 with torch.no_grad(): encoder_outputs = model.transformer.encoder(model.positional_encoding(model.src_embedding(src_tensor) * math.sqrt(model.d_model))) trg_indexes = [tgt_vocab['<bos>']] for i in range(max_length): trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device) with torch.no_grad(): output = model(src_tensor, trg_tensor) pred_token = output.argmax(2)[:, -1].item() trg_indexes.append(pred_token) if pred_token == tgt_vocab['<eos>']: break trg_tokens = [tgt_vocab.get_itos()[i] for i in trg_indexes] return trg_tokens[1:-1] # 移除<bos>和<eos>标记 def calculate_bleu(dev_loader, src_vocab, tgt_vocab, model, device): model.eval() translations = [] references = [] with torch.no_grad(): for src, tgt in dev_loader: src = src.to(device) for sentence in src: translated = translate_sentence(sentence, src_vocab, tgt_vocab, model, device) translations.append(' '.join(translated)) for reference in tgt: ref_tokens = [tgt_vocab.get_itos()[idx] for idx in reference if idx not in [tgt_vocab['<bos>'], tgt_vocab['<eos>'], tgt_vocab['<pad>']]] references.append([' '.join(ref_tokens)]) bleu = sacrebleu.corpus_bleu(translations, references) return bleu.score
学习率一般默认值为1e-3,调参可以学习下面的博客PyTorch之八—优化器&学习率
# 主训练循环 def train_model(model, train_iterator, valid_iterator, optimizer, criterion, N_EPOCHS=10, CLIP=1, save_path = '../model/best-model_transformer.pt'): best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() #print(f"Starting Epoch {epoch + 1}") train_loss = train(model, train_iterator, optimizer, criterion, CLIP) valid_loss = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), save_path) print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}') print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
# 定义常量 MAX_LENGTH = 100 # 最大句子长度 BATCH_SIZE = 32 DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') N = 148363 # 采样训练集的数量,最多148363 # 这里直接设置为最大值 train_path = '../dataset/train.txt' dev_en_path = '../dataset/dev_en.txt' dev_zh_path = '../dataset/dev_zh.txt' test_en_path = '../dataset/test_en.txt' train_loader, dev_loader, test_loader, en_vocab, zh_vocab = load_data( train_path, dev_en_path, dev_zh_path, test_en_path ) print(f"英语词汇表大小: {len(en_vocab)}") print(f"中文词汇表大小: {len(zh_vocab)}") print(f"训练集大小: {len(train_loader.dataset)}") print(f"开发集大小: {len(dev_loader.dataset)}") print(f"测试集大小: {len(test_loader.dataset)}")
主函数调用上面的函数,进行训练,其中模型参数可以进行调整。
模型参数:
-
D_MODEL: Transformer 模型的词嵌入和隐含状态向量的维度。baseline设置为256,可以提高到 512 ,并根据数据集和任务进行调整。
-
NHEAD: 多头注意机制中注意头的数量。建议从 8 开始,并根据数据集和任务进行调整。
-
NUM_ENCODER_LAYERS: 编码器中的 Transformer 层数。建议从 3 开始,并根据数据集和任务进行调整。
-
NUM_DECODER_LAYERS: 解码器中的 Transformer 层数。建议与
NUM_ENCODER_LAYERS
相同或更大。 -
DIM_FEEDFORWARD: 前馈层中的隐藏维度。建议从 512 开始,并根据数据集和任务进行调整。
-
DROPOUT: Transformer 层中的丢弃率。建议从 0.1 开始,并根据数据集和任务进行调整。
训练参数:
-
N_EPOCHS: 训练的 epoch 数。建议从 5 开始,并根据数据集和任务进行调整。
-
CLIP: 梯度裁剪阈值。建议从 1 开始,并根据数据集和任务进行调整。
调参建议:
-
对于较小的数据集或简单任务,可以减少模型参数(例如,
D_MODEL
、NHEAD
、NUM_ENCODER_LAYERS
、NUM_DECODER_LAYERS
、DIM_FEEDFORWARD
);对于较大的数据集或复杂任务,可以增加模型参数。 -
训练过程中监控验证集上的损失和指标(例如,F1 分数),并根据需要调整模型参数和训练参数。
-
使用学习率热身和学习率衰减等技术来优化训练过程。
-
尝试不同的优化器和调度程序,例如 Adam、AdamW 和 cosine 退火调度程序。
# 主函数 if __name__ == '__main__': # 模型参数 D_MODEL = 256 NHEAD = 8 NUM_ENCODER_LAYERS = 3 NUM_DECODER_LAYERS = 3 DIM_FEEDFORWARD = 512 DROPOUT = 0.1 N_EPOCHS = 5 CLIP = 1 # 初始化模型 model = initialize_model(en_vocab, zh_vocab, D_MODEL, NHEAD, NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, DIM_FEEDFORWARD, DROPOUT).to(DEVICE) print(f'The model has {sum(p.numel() for p in model.parameters() if p.requires_grad):,} trainable parameters') # 定义损失函数 criterion = nn.CrossEntropyLoss(ignore_index=zh_vocab['<pad>']) # 初始化优化器 optimizer = optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9) # 训练模型 save_path = '../model/best-model_transformer.pt' train_model(model, train_loader, dev_loader, optimizer, criterion, N_EPOCHS, CLIP, save_path=save_path) print(f"训练完成!模型已保存到:{save_path}")
step6 在开发集上进行评价(暂时注释掉)
# model = initialize_model(en_vocab, zh_vocab, D_MODEL, NHEAD, NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, DIM_FEEDFORWARD, DROPOUT).to(DEVICE) # # 加载最佳模型 # model.load_state_dict(torch.load('../model/best-model_transformer.pt')) # # 计算BLEU分数 # bleu_score = calculate_bleu(dev_loader, en_vocab, zh_vocab, model, DEVICE) # print(f'BLEU score = {bleu_score*100:.2f}')
step7 对测试集进行翻译
# 加载最佳模型 model.load_state_dict(torch.load('../model/best-model_transformer.pt')) save_dir = '../results/submit_task3.txt' with open(save_dir, 'w') as f: translated_sentences = [] for batch in test_loader: # 遍历所有数据 src, _ = batch src = src.to(DEVICE) translated = translate_sentence(src[0], en_vocab, zh_vocab, model, DEVICE) #翻译结果 results = "".join(translated) f.write(results + '\n') # 将结果写入文件 print(f"翻译完成,结果已保存到{save_dir}")
上分技巧尝试和学习
1.调参
按照上述讲解过程中的调参原则进行修改
# 模型参数 D_MODEL = 512 NHEAD = 16 NUM_ENCODER_LAYERS = 6 NUM_DECODER_LAYERS = 6 DIM_FEEDFORWARD = 512 DROPOUT = 0.1 N_EPOCHS = 16 CLIP = 1
2.加入术语词典
# # 存储成字典 def load_dictionary(dict_path): term_dict = {} with open(dict_path, 'r', encoding='utf-8') as f: data = f.read() data = data.strip().split('\n') source_term = [line.split('\t')[0] for line in data] target_term = [line.split('\t')[1] for line in data] for i in range(len(source_term)): term_dict[source_term[i]] = target_term[i] return term_dict def post_process_translation(translation, term_dict): translated_words = [term_dict.get(word, word) for word in translation] return "".join(translated_words) # # 加载你的术语词典 dict_path = '../dataset/en-zh.dic' # 这应该是你的术语词典文件路径 term_dict = load_dictionary(dict_path)
最后翻译主函数不要忘记加
results = post_process_translation(translated, term_dict)
再将结果写入文档!
3.清洗数据
-
去除无关信息(标签、特殊字符、非文本内容等)
-
统一格式
简单地去除无关信息
import re def clean_text(sentence): # 过滤HTML标签 sentence = re.sub(r'<.*?>', '', sentence) # 过滤数字 sentence = re.sub(r'\d+', '', sentence) # 过滤特殊符号 sentence = re.sub(r'[^\w\s]', '', sentence) # 过滤空格 sentence = sentence.replace(' ', '') return sentence
去除拟声词
# 数据清洗 import re import contractions import unicodedata def unicodeToAscii(text): return ''.join(c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn') def preprocess_en(text): text = unicodeToAscii(text.strip()) text = contractions.fix(text) text = re.sub(r'\([^)]*\)', '', text) text = re.sub(r"[^a-zA-Z0-9.!?]+", r" ", text) # 保留数字 return text def preprocess_zh(text): patterns_to_replace = ["(笑声)", "(掌声)", "(口哨声)","口哨声)", "(音乐)", "(鼓掌)", "(笑)", "(众笑)", "(视频):", "(大笑)", "(录音)", "(消音)", "(欢呼)", "(视频)", "(叫声)", "(录像):", "(录像)", "(拍手)", "(大喊)", "(吟唱)", "(噪音)", "(铃声)", "(尖叫)", "(影片)", "(声音)", "(喇叭)", "(齐唱)", "(混音)", "(音频)", "(影视)", "(噪声)", "(口哨)", "(击掌)", "(铃铛)", "(小号)", "(歌声)", "(狂笑)", "(演唱)", "(喝彩)", "(配乐)", "(调音)", "(笑话)", "(叹气)", "(鸟鸣)", "(鸟鸣)", "(爆炸)", "(枪声)", "(爆笑)", "(滑音)", "(音调)", "(游戏)", "(笑)", "(淫笑)", "(音译)", "(笑♫)", "(音乐)", "(咳嗽)", "(咳嗽)", "(马嘶声)", "(音乐声)", "(鼓掌声)", "(众人笑)", "(喇叭声)","(钢琴声)", "(吹口哨)","(尖叫声)", "(大家笑)", "(重击声)", "(呼吸声)", "(感叹声)", "(敲打声)", "(背景音)", "(噼啪声)", "(观众笑)", "(爆炸声)","(歌词:)", "(敲椅声)","(滋滋声)", "(静电声)", "(笑~~)", "(喝彩声)", "(抨击声)", "(咳嗽声)", "(喊叫声)", "(风雨声)", "(哭泣声)", "(大笑声)", "(欢呼声)", "(嘀嘀声)", "(闹铃声)", "(拍手声)", "(讨论声)", "(鼓掌♫)", "(喘息声)", "(打呼声)", "(惊叫声)", "(议论声)", "(音乐起)", "(小提琴)", "(拍巴掌)", "(众鼓掌)", "(众人鼓掌)", "(众人欢呼)", "(观众笑声)", "(观众掌声)", "(热烈鼓掌)", "(哄堂大笑)", "(警报噪声)", "(掌声♫♪)", "(按喇叭声)", "(众人大笑)", "(现场笑声)", "(限频音乐)", "(音乐响起)", "(掌声。 )", "(观众鼓掌)", "(电话铃声)", "(又是狂笑)", "(电话铃响)", "(音乐和声)", "(笑声,掌声)", "(频率的声音)", "(众笑+鼓掌)", "(相机快门声)", "(音乐录影带)", "(诺基亚铃声)", "(听众的笑声)", "(无意义的声音)", "(笑+鼓掌♫♫)", "(发射时的噪音)", "(人群的欢呼声)", "(打喷嚏的声音)" # "", "","", "","", "","", "", ] pattern = "|".join(map(re.escape, patterns_to_replace)) pattern1 = r'(.*?)' # 直接替换掉带括号的词 text = re.sub(pattern, "", text) return text sen = "我们管它叫做 一个情感工程 它使用最新的 十七世纪的技术- (笑声) 来把名词 变成动词" text = preprocess_zh(sen) print(text) sen = "there's a dog" text = preprocess_en(sen) print(text) # 数据预处理函数 def preprocess_data(en_data: List[str], zh_data: List[str]) -> List[Tuple[List[str], List[str]]]: processed_data = [] for en, zh in zip(en_data, zh_data): # 将英文缩写拆开 there's -> there is en = preprocess_zh(en) # 替换掉中文中的语气如:(笑声) zh = preprocess_zh(zh) en_tokens = en_tokenizer(en.lower())[:MAX_LENGTH] zh_tokens = zh_tokenizer(zh)[:MAX_LENGTH] if en_tokens and zh_tokens: # 确保两个序列都不为空 processed_data.append((en_tokens, zh_tokens)) return processed_data
最终实验结果
采用了如下调参数据、和上面提到的数据清洗函数和加入术语词典函数
# 定义常量 MAX_LENGTH = 120 # 最大句子长度,由100调到120 BATCH_SIZE = 32 DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') N = 148363 # 采样训练集的数量,最多148363 # 这里直接设置为最大值
# 模型参数 D_MODEL = 512 NHEAD = 16 NUM_ENCODER_LAYERS = 6 NUM_DECODER_LAYERS = 6 DIM_FEEDFORWARD = 1024 DROPOUT = 0.1 N_EPOCHS = 10 CLIP = 1
def unicodeToAscii(text): return ''.join(c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn') def preprocess_en(text): text = unicodeToAscii(text.strip()) text = contractions.fix(text) text = re.sub(r'\([^)]*\)', '', text) # 过滤HTML标签 text = re.sub(r'<.*?>', '', text) # 保留数字 text = re.sub(r"[^a-zA-Z0-9.!?]+", r" ", text) # 过滤特殊符号 text = re.sub(r'[^\w\s]', '', text) # 过滤空格 text = text.replace(' ', '') return text def preprocess_zh(text): patterns_to_replace = ["(笑声)", "(掌声)", "(口哨声)","口哨声)", "(音乐)", "(鼓掌)", "(笑)", "(众笑)", "(视频):", "(大笑)", "(录音)", "(消音)", "(欢呼)", "(视频)", "(叫声)", "(录像):", "(录像)", "(拍手)", "(大喊)", "(吟唱)", "(噪音)", "(铃声)", "(尖叫)", "(影片)", "(声音)", "(喇叭)", "(齐唱)", "(混音)", "(音频)", "(影视)", "(噪声)", "(口哨)", "(击掌)", "(铃铛)", "(小号)", "(歌声)", "(狂笑)", "(演唱)", "(喝彩)", "(配乐)", "(调音)", "(笑话)", "(叹气)", "(鸟鸣)", "(鸟鸣)", "(爆炸)", "(枪声)", "(爆笑)", "(滑音)", "(音调)", "(游戏)", "(笑)", "(淫笑)", "(音译)", "(笑♫)", "(音乐)", "(咳嗽)", "(咳嗽)", "(马嘶声)", "(音乐声)", "(鼓掌声)", "(众人笑)", "(喇叭声)","(钢琴声)", "(吹口哨)","(尖叫声)", "(大家笑)", "(重击声)", "(呼吸声)", "(感叹声)", "(敲打声)", "(背景音)", "(噼啪声)", "(观众笑)", "(爆炸声)","(歌词:)", "(敲椅声)","(滋滋声)", "(静电声)", "(笑~~)", "(喝彩声)", "(抨击声)", "(咳嗽声)", "(喊叫声)", "(风雨声)", "(哭泣声)", "(大笑声)", "(欢呼声)", "(嘀嘀声)", "(闹铃声)", "(拍手声)", "(讨论声)", "(鼓掌♫)", "(喘息声)", "(打呼声)", "(惊叫声)", "(议论声)", "(音乐起)", "(小提琴)", "(拍巴掌)", "(众鼓掌)", "(众人鼓掌)", "(众人欢呼)", "(观众笑声)", "(观众掌声)", "(热烈鼓掌)", "(哄堂大笑)", "(警报噪声)", "(掌声♫♪)", "(按喇叭声)", "(众人大笑)", "(现场笑声)", "(限频音乐)", "(音乐响起)", "(掌声。 )", "(观众鼓掌)", "(电话铃声)", "(又是狂笑)", "(电话铃响)", "(音乐和声)", "(笑声,掌声)", "(频率的声音)", "(众笑+鼓掌)", "(相机快门声)", "(音乐录影带)", "(诺基亚铃声)", "(听众的笑声)", "(无意义的声音)", "(笑+鼓掌♫♫)", "(发射时的噪音)", "(人群的欢呼声)", "(打喷嚏的声音)" # "", "","", "","", "","", "", ] pattern = "|".join(map(re.escape, patterns_to_replace)) pattern1 = r'(.*?)' # 直接替换掉带括号的词 text = re.sub(pattern, "", text) return text
谢谢观看,欢迎点评指导~