在本节中,我们基于FastText算法实现一个模型论文,比之前的使用更少的参数,训练速度也明显加快
准备数据
FastText论文中的关键概念之一是它们计算输入句子的n-gram,并将它们附加到句子的末尾。这里我们使用bi-grams。
For example, in the sentence “how are you ?”, the bi-grams are: “how are”, “are you” and “you ?”.
generate_bigrams
采用已经被分词的句子,计算bi-grams并将其附加到句子的末尾。
def generate_bigrams(x):
n_grams = set(zip(*[x[i:] for i in range(2)]))#zip可以将两个列表组装为元祖输出
for n_gram in n_grams:
x.append(' '.join(n_gram))
return x
generate_bigrams(['This', 'film', 'is', 'terrible'])
'''
['This', 'film', 'is', 'terrible', 'This film', 'film is', 'is terrible']
'''
使用Field定义处理数据的方式
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize = 'spacy',
tokenizer_language = 'en_core_web_sm',
preprocessing = generate_bigrams)
LABEL = data.LabelField(dtype = torch.float)
加载数据并拆分
import random
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
构建词汇表并加载预训练的word embedding
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data,
max_size = MAX_VOCAB_SIZE,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
创建iterator
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
创建模型
该模型的参数比以前的模型少得多,因为它只有2个具有任何参数的层,即嵌入层和线性层。 看不到RNN组件!
它首先使用Embedding层(蓝色)为每个单词计算单词嵌入,然后计算所有单词嵌入的平均值(粉红色)并将其馈送到Linear层(银色),仅此而已!
我们使用avg_pool2d
函数来实现2维平均。你可能会以为使用2维池化很奇怪,我们的句子是1维的啊,为什么要用二维的。你可以将word embedding视为二维网格,其中单词沿一个轴,而Word embedding沿另一个轴。下图是word embeeding维度为5的示例句子,构成了一个[4*5]的张量
avg_pool2d使用大小为Embedded.shape [1](即句子的长度)乘以1的过滤器。这在下图中以粉红色显示。
我们计算过滤器覆盖的所有元素的平均值,然后过滤器向右滑动,为句子中每个单词的嵌入值的下一列计算平均值。
每个过滤器位置给我们一个值,即所有覆盖元素的平均值。 过滤器覆盖所有嵌入尺寸后,我们得到一个[1x5]张量。 然后将该张量通过线性层以产生我们的预测。
import torch.nn as nn
import torch.nn.functional as F
class FastText(nn.Module):
def __init__(self, vocab_size, embedding_dim, output_dim, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
self.fc = nn.Linear(embedding_dim, output_dim)
def forward(self, text):
#text = [sent len, batch size]
embedded = self.embedding(text)
#embedded = [sent len, batch size, emb dim]
embedded = embedded.permute(1, 0, 2)
#embedded = [batch size, sent len, emb dim]
pooled = F.avg_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1)
#pooled = [batch size, embedding_dim]
return self.fc(pooled)
和以前一样,我们将创建FastText类
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
OUTPUT_DIM = 1
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = FastText(INPUT_DIM, EMBEDDING_DIM, OUTPUT_DIM, PAD_IDX)
查看模型中的参数数量,我们发现与第一个笔记本中的标准RNN大致相同,而前一个模型的参数只有一半。
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
'''
The model has 2,500,301 trainable parameters
'''
并将预训练的向量复制到嵌入层
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
不要忘了将<unk>,<pad>初始权重归零
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
训练模型
步骤和之前的一样
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
最后训练模型
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut3-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
'''
Epoch: 01 | Epoch Time: 0m 7s
Train Loss: 0.688 | Train Acc: 61.31%
Val. Loss: 0.637 | Val. Acc: 72.46%
Epoch: 02 | Epoch Time: 0m 6s
Train Loss: 0.651 | Train Acc: 75.04%
Val. Loss: 0.507 | Val. Acc: 76.92%
Epoch: 03 | Epoch Time: 0m 6s
Train Loss: 0.578 | Train Acc: 79.91%
Val. Loss: 0.424 | Val. Acc: 80.97%
Epoch: 04 | Epoch Time: 0m 6s
Train Loss: 0.501 | Train Acc: 83.97%
Val. Loss: 0.377 | Val. Acc: 84.34%
Epoch: 05 | Epoch Time: 0m 6s
Train Loss: 0.435 | Train Acc: 86.96%
Val. Loss: 0.363 | Val. Acc: 86.18%
'''
在测试集上的表现如何呢
model.load_state_dict(torch.load('tut3-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
'''
Test Loss: 0.381 | Test Acc: 85.42%
'''
用户输入
和之前的一样,先让句子产生bi-gram,然后通过Text.vocab.stoi
找到对应token的值
import spacy
nlp = spacy.load('en_core_web_sm')
def predict_sentiment(model, sentence):
model.eval()
tokenized = generate_bigrams([tok.text for tok in nlp.tokenizer(sentence)])
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
prediction = torch.sigmoid(model(tensor))
return prediction.item()
'''
predict_sentiment(model, "This film is terrible")
predict_sentiment(model, "This film is great")
'''
下一步
将使用CNN用在情感分析任务上