随手笔记前言
利用Torchtext 0.12.0以上版本(实际0.14.0)进行IMDB评论情感分类(消极或积极)
一、写该随手目的?
由于Torchtext0.12.0版本大更新,现使用Torchtext0.12.0以上版本(实际0.14.0)进行IMDB评论态度分类。
由于在使用以下新的torchtext进行IMDB数据下载时,常常因为网络问题不能下载,现附上IMDB数据集。
链接:https://pan.baidu.com/s/1-2a7QwTzcRonO3pG3DZXbw
提取码:yyds
from torchtext.datasets import IMDB
train_iter, test_iter = IMDB()
数据集下载后,放在 C:\Users\<你自己的用户名>\.cache\torch\text\datasets\IMDB\
目录下即可。
二、随手笔记内容
1.代码
代码如下:
import torch
from torchtext.datasets import IMDB
import torch.nn as nn
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
tokenizer = get_tokenizer('basic_english') # 分割器
train_iter = IMDB(split='train') # 下载IMDB数据集训练集
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=['<unk>']) # 建立词表
vocab.set_default_index(vocab['<unk>'])
# print(vocab(['here', 'is', 'an', 'example']))
text_pipeline = lambda x: vocab(tokenizer(x)) # 处理文本的管道
label_pipeline = lambda x: int(x) - 1 # 处理label的管道
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch) # 形成批处理的dataloader
class TextClassificationModel(nn.Module): # 分类模型
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
num_class = len(set([label for (label, text) in train_iter]))
vocab_size = len(vocab)
emsize = 64
model = TextClassificationModel(vocab_size, emsize, num_class).to(device)
import time
def train(dataloader, optimizer, criterion, epoch): # 训练函数
model.train()
total_acc, total_count = 0, 0
log_interval = 50
start_time = time.time()
for idx, (label, text, offsets) in enumerate(dataloader):
optimizer.zero_grad()
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches '
'| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),
total_acc / total_count))
total_acc, total_count = 0, 0
start_time = time.time()
def evaluate(dataloader): # 评价函数
model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc / total_count
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# 超参
EPOCHS = 10 # epoch
LR = 5 # 学习率
BATCH_SIZE = 64 # batch大小
criterion = torch.nn.CrossEntropyLoss() # 这里也可以二分类损失
optimizer = torch.optim.SGD(model.parameters(), lr=LR) # 这里也可以定义Adam优化
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
train_iter, test_iter = IMDB() # IMDB训练集测试集都下载
train_dataset = to_map_style_dataset(train_iter)
test_dataset = to_map_style_dataset(test_iter)
num_train = int(len(train_dataset) * 0.95)
split_train_, split_valid_ = \
random_split(train_dataset, [num_train, len(train_dataset) - num_train])
train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
# 训练
for epoch in range(1, EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader, optimizer, criterion, epoch)
accu_val = evaluate(valid_dataloader)
if total_accu is not None and total_accu > accu_val:
scheduler.step()
else:
total_accu = accu_val
print('-' * 59)
print('| end of epoch {:3d} | time: {:5.2f}s | '
'valid accuracy {:8.3f} '.format(epoch,
time.time() - epoch_start_time,
accu_val))
print('-' * 59)
# 测试
print('Checking the results of test dataset.')
accu_test = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(accu_test))
# 实际演示
ag_news_label = {1: "neg",
2: "pos"}
def predict(text, text_pipeline):
with torch.no_grad():
text = torch.tensor(text_pipeline(text))
output = model(text, torch.tensor([0]))
return output.argmax(1).item() + 1
ex_text_str = "This movie is rubbish, I don't like it very much."
model = model.to("cpu")
print()
print('-----------------------------------------------')
print("This is a %s review" % ag_news_label[predict(ex_text_str, text_pipeline)])
2.展示结果
以上只是经过了10个epoch的训练,同时没有做太多的优化,毕竟这个只是做一个Demo,后续可以在各个方面做优化,来提高准确率。
总结
以上即为:利用Torchtext 0.12.0以上版本(实际0.14.0)进行IMDB评论情感分类(消极或积极)