第TR6周:Transformer 实现文本分类 - Embedding版

任务:
●在《第TR5周:Transformer实战:文本分类》代码基础上,将嵌入方式改为Embedding嵌入
●理解文中代码逻辑并成功运行
●根据自己的理解对代码进行调优,使验证集准确率达到79%

一、准备工作

  1. 环境配置

这是一个使用PyTorch实现的简单文本分类实战案例。

import torch,torchvision
print(torch.__version__)  #注意是双下划线
print(torchvision.__version__)

代码输出

2.0.0+cpu
0.15.1+cpu
import math,os,PIL,pathlib,warnings
import torch.nn as nn
import numpy    as np
from torchvision import transforms, datasets

warnings.filterwarnings("ignore")             #忽略警告信息

# 设置GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

代码输出

device(type='cpu')
  1. 加载数据
import pandas as pd

# 加载自定义中文数据
train_data = pd.read_csv('./TR6/train.csv', sep='\t', header=None)
train_data.head()

代码输出

01
0还有双鸭山到淮阴的汽车票吗13号的Travel-Query
1从这里怎么回家Travel-Query
2随便播放一首专辑阁楼里的佛里的歌Music-Play
3给看一下墓王之王嘛FilmTele-Play
4我想看挑战两把s686打突变团竞的游戏视频Video-Play
label_name = list(set(train_data[1].values[:]))
print(label_name)

代码输出

['Radio-Listen', 'TVProgram-Play', 'Video-Play', 'Travel-Query', 'Weather-Query', 'Music-Play', 'HomeAppliance-Control', 'Other', 'Calendar-Query', 'FilmTele-Play', 'Alarm-Update', 'Audio-Play']

二、数据预处理

  1. 构建词典

需要另外安装jieba分词库

from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import jieba

# 中文分词方法
tokenizer = jieba.lcut

def yield_tokens(data_iter):
    for text in data_iter:
        yield tokenizer(text)

vocab = build_vocab_from_iterator(yield_tokens(train_data[0].values[:]), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"]) # 设置默认索引,如果找不到单词,则会选择默认索引

代码输出

Building prefix dict from the default dictionary ...
Loading model from cache C:\Users\xzy\AppData\Local\Temp\jieba.cache
Loading model cost 0.934 seconds.
Prefix dict has been built successfully.
text_pipeline  = lambda x: torch.tensor(vocab(tokenizer(x)))
label_pipeline = lambda x: torch.tensor(label_name.index(x))

print(text_pipeline('我想看和平精英上战神必备技巧的游戏视频'))
print(label_pipeline('Video-Play'))

代码输出

tensor([   2,   10,   13,  973, 1079,  146, 7724, 7574, 7793,    1,  186,   28])
tensor(2)
  1. 进行one-hot编码
from functools import partial

X = [text_pipeline(i) for i in train_data[0].values[:]]
y = [label_pipeline(i) for i in train_data[1].values[:]]

# 对便签 y 进行 one-hot 编码
numbers_array = np.array(y)             # 转换为 NumPy 数组
num_classes   = np.max(numbers_array)+1 # 计算类别数量
y = np.eye(num_classes)[numbers_array]  # 进行 one-hot 编码
  1. 自定义数据集类
from torch.utils.data import DataLoader, Dataset

class TextDataset(Dataset):
    def __init__(self, texts, labels):
        self.texts  = texts
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.texts[idx], self.labels[idx]
  1. 定义填充函数
import torch.nn.functional as F

max_len = max(len(i) for i in X)

def collate_batch(batch, max_len):
    texts, labels = zip(*batch)
    padded_texts = [F.pad(text, (0, max_len - len(text)), value=0) for text in texts]
    padded_texts = torch.stack(padded_texts)
    labels = torch.tensor(labels, dtype=torch.float)#.unsqueeze(1)
    return padded_texts.to(device), labels.to(device)

# 使用 partial 函数创建 collate_fn,传入参数
collate_fn = partial(collate_batch, max_len=max_len)
max_len

代码输出

30
  1. 构建数据集
from torch.utils.data.dataset import random_split

# 示例词典大小和嵌入维度
vocab_size = 10
embed_dim  = 6

# 创建数据集和数据加载器
dataset = TextDataset(X, y)

train_dataset, valid_dataset = random_split(dataset,
                                          [int(len(dataset)*0.8),
                                           int(len(dataset)*0.2)])

train_dataloader = DataLoader(train_dataset, batch_size=128,
                          shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(valid_dataset, batch_size=128,
                          shuffle=True, collate_fn=collate_fn)

三、模型构建

  1. 定义位置编码函数
class PositionalEncoding(nn.Module):
    def __init__(self, embed_dim, max_len=100):
        super(PositionalEncoding, self).__init__()

        # 创建一个大小为 [max_len, embed_dim] 的零张量
        pe = torch.zeros(max_len, embed_dim) 
        # 创建一个形状为 [max_len, 1] 的位置索引张量
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) 
        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * (-math.log(100.0) / embed_dim))
        
        pe[:, 0::2] = torch.sin(position * div_term) # 计算 PE(pos, 2i)
        pe[:, 1::2] = torch.cos(position * div_term) # 计算 PE(pos, 2i+1)

        pe = pe.unsqueeze(0)

        # 将位置编码张量注册为模型的缓冲区,参数不参与梯度下降,保存model的时候会将其保存下来
        self.register_buffer('pe', pe)

    def forward(self, x):
         # x 的形状为[batch_size, seq_len, embedding_dim]
        # 将位置编码添加到输入张量中,注意位置编码的形状
        # print(x.shape, self.pe.shape, self.pe[:,:x.size(1), :].shape)
        x = x + self.pe[:,:x.size(1)]
        # print(x.shape)
        return x
  1. 定义Transformer模型
from tempfile import TemporaryDirectory
from typing   import Tuple
from torch    import Tensor
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.utils.data import dataset

class TransformerModel(nn.Module):

    def __init__(self, vocab_size, embed_dim, max_len, num_class, 
                 nhead=8, d_hid=256, nlayers=12, dropout=0.1):
        super().__init__()

        self.embedding = nn.Embedding(vocab_size,   # 词典大小
                                      embed_dim)    # 嵌入的维度
        
        self.pos_encoder = PositionalEncoding(embed_dim)

        # 定义编码器层
        encoder_layers           = TransformerEncoderLayer(embed_dim, nhead, d_hid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.embed_dim           = embed_dim
        self.linear              = nn.Linear(embed_dim*max_len, num_class)
        
    def forward(self, src, src_mask=None):

        src    = self.embedding(src)
        src    = self.pos_encoder(src)
        output = self.transformer_encoder(src, src_mask)

        output = output.view(output.size(0), -1)
        output = self.linear(output)
 
        return output
  1. 定义训练函数
# 训练循环
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)  # 训练集的大小
    num_batches = len(dataloader)   # 批次数目, (size/batch_size,向上取整)

    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y.argmax(1)).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss
  1. 定义测试函数
def test(dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target.argmax(1)).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

四、训练模型

vocab_size = len(vocab) # 词汇表的大小
embed_dim  = 64         # 嵌入维度
num_class  = len(label_name)

# 创建 Transformer 模型,并将其移动到设备上
model = TransformerModel(vocab_size, 
                         embed_dim, 
                         max_len,
                         num_class).to(device)
  1. 模型训练
import torch.optim as optim

# 超参数
EPOCHS     = 50 

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)

for epoch in range(1, EPOCHS + 1):
    model.train()  # 切换为训练模式
    train_acc, train_loss = train(train_dataloader, model, criterion, optimizer)
    
    model.eval()  # 切换为测试模式
    test_acc, test_loss = test(valid_dataloader, model, criterion)
    
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    print('| epoch {:1d} | train_acc {:4.4f} train_loss {:4.4f} | lr {:4.4f}'.format(epoch,
                                                                                     train_acc,
                                                                                     train_loss,
                                                                                     lr))
    print('| epoch {:1d} | test_acc {:4.4f} test_loss {:4.4f} | lr {:4.4f}'.format(epoch,
                                                                                   test_acc,
                                                                                   test_loss,
                                                                                   lr))
    print('-' * 69)

代码输出

| epoch 1 | train_acc 0.1150 train_loss 2.6222 | lr 0.0100
| epoch 1 | test_acc 0.1128 test_loss 2.6013 | lr 0.0100
---------------------------------------------------------------------
| epoch 2 | train_acc 0.1405 train_loss 2.5153 | lr 0.0100
| epoch 2 | test_acc 0.1860 test_loss 2.3967 | lr 0.0100
---------------------------------------------------------------------
| epoch 3 | train_acc 0.1987 train_loss 2.2949 | lr 0.0100
| epoch 3 | test_acc 0.1938 test_loss 2.2864 | lr 0.0100
---------------------------------------------------------------------
| epoch 4 | train_acc 0.2620 train_loss 2.1522 | lr 0.0100
| epoch 4 | test_acc 0.3000 test_loss 2.0298 | lr 0.0100
---------------------------------------------------------------------
| epoch 5 | train_acc 0.3127 train_loss 2.0171 | lr 0.0100
| epoch 5 | test_acc 0.3149 test_loss 1.9701 | lr 0.0100
---------------------------------------------------------------------
| epoch 6 | train_acc 0.3714 train_loss 1.8808 | lr 0.0100
| epoch 6 | test_acc 0.3508 test_loss 1.9691 | lr 0.0100
---------------------------------------------------------------------
| epoch 7 | train_acc 0.4099 train_loss 1.7816 | lr 0.0100
| epoch 7 | test_acc 0.4322 test_loss 1.7314 | lr 0.0100
---------------------------------------------------------------------
| epoch 8 | train_acc 0.4507 train_loss 1.6637 | lr 0.0100
| epoch 8 | test_acc 0.4678 test_loss 1.6182 | lr 0.0100
---------------------------------------------------------------------
| epoch 9 | train_acc 0.4877 train_loss 1.5725 | lr 0.0100
| epoch 9 | test_acc 0.5041 test_loss 1.5307 | lr 0.0100
---------------------------------------------------------------------
| epoch 10 | train_acc 0.5080 train_loss 1.4986 | lr 0.0100
| epoch 10 | test_acc 0.4579 test_loss 1.6603 | lr 0.0100
---------------------------------------------------------------------
| epoch 11 | train_acc 0.5290 train_loss 1.4443 | lr 0.0100
| epoch 11 | test_acc 0.5182 test_loss 1.4684 | lr 0.0100
---------------------------------------------------------------------
| epoch 12 | train_acc 0.5518 train_loss 1.3683 | lr 0.0100
| epoch 12 | test_acc 0.5145 test_loss 1.4765 | lr 0.0100
---------------------------------------------------------------------
| epoch 13 | train_acc 0.5759 train_loss 1.3115 | lr 0.0100
| epoch 13 | test_acc 0.5512 test_loss 1.3966 | lr 0.0100
---------------------------------------------------------------------
| epoch 14 | train_acc 0.5948 train_loss 1.2573 | lr 0.0100
| epoch 14 | test_acc 0.5153 test_loss 1.4676 | lr 0.0100
---------------------------------------------------------------------
| epoch 15 | train_acc 0.6159 train_loss 1.1923 | lr 0.0100
| epoch 15 | test_acc 0.6103 test_loss 1.2591 | lr 0.0100
---------------------------------------------------------------------
| epoch 16 | train_acc 0.6320 train_loss 1.1504 | lr 0.0100
| epoch 16 | test_acc 0.5942 test_loss 1.2556 | lr 0.0100
---------------------------------------------------------------------
| epoch 17 | train_acc 0.6514 train_loss 1.1079 | lr 0.0100
| epoch 17 | test_acc 0.5876 test_loss 1.2930 | lr 0.0100
---------------------------------------------------------------------
| epoch 18 | train_acc 0.6612 train_loss 1.0774 | lr 0.0100
| epoch 18 | test_acc 0.6202 test_loss 1.1998 | lr 0.0100
---------------------------------------------------------------------
| epoch 19 | train_acc 0.6725 train_loss 1.0297 | lr 0.0100
| epoch 19 | test_acc 0.6492 test_loss 1.1037 | lr 0.0100
---------------------------------------------------------------------
| epoch 20 | train_acc 0.6890 train_loss 0.9942 | lr 0.0100
| epoch 20 | test_acc 0.6727 test_loss 1.0470 | lr 0.0100
---------------------------------------------------------------------
| epoch 21 | train_acc 0.6955 train_loss 0.9585 | lr 0.0100
| epoch 21 | test_acc 0.6620 test_loss 1.0686 | lr 0.0100
---------------------------------------------------------------------
| epoch 22 | train_acc 0.7079 train_loss 0.9375 | lr 0.0100
| epoch 22 | test_acc 0.6835 test_loss 1.0248 | lr 0.0100
---------------------------------------------------------------------
| epoch 23 | train_acc 0.7182 train_loss 0.8986 | lr 0.0100
| epoch 23 | test_acc 0.6645 test_loss 1.0549 | lr 0.0100
---------------------------------------------------------------------
| epoch 24 | train_acc 0.7319 train_loss 0.8636 | lr 0.0100
| epoch 24 | test_acc 0.7079 test_loss 0.9836 | lr 0.0100
---------------------------------------------------------------------
| epoch 25 | train_acc 0.7354 train_loss 0.8479 | lr 0.0100
| epoch 25 | test_acc 0.6818 test_loss 1.0112 | lr 0.0100
---------------------------------------------------------------------
| epoch 26 | train_acc 0.7474 train_loss 0.8160 | lr 0.0100
| epoch 26 | test_acc 0.7033 test_loss 0.9536 | lr 0.0100
---------------------------------------------------------------------
| epoch 27 | train_acc 0.7545 train_loss 0.7959 | lr 0.0100
| epoch 27 | test_acc 0.7256 test_loss 0.9125 | lr 0.0100
---------------------------------------------------------------------
| epoch 28 | train_acc 0.7575 train_loss 0.7768 | lr 0.0100
| epoch 28 | test_acc 0.7273 test_loss 0.9175 | lr 0.0100
---------------------------------------------------------------------
| epoch 29 | train_acc 0.7687 train_loss 0.7512 | lr 0.0100
| epoch 29 | test_acc 0.7260 test_loss 0.8718 | lr 0.0100
---------------------------------------------------------------------
| epoch 30 | train_acc 0.7731 train_loss 0.7341 | lr 0.0100
| epoch 30 | test_acc 0.7343 test_loss 0.8861 | lr 0.0100
---------------------------------------------------------------------
| epoch 31 | train_acc 0.7822 train_loss 0.7141 | lr 0.0100
| epoch 31 | test_acc 0.7492 test_loss 0.8450 | lr 0.0100
---------------------------------------------------------------------
| epoch 32 | train_acc 0.7870 train_loss 0.6887 | lr 0.0100
| epoch 32 | test_acc 0.7517 test_loss 0.8145 | lr 0.0100
---------------------------------------------------------------------
| epoch 33 | train_acc 0.7909 train_loss 0.6792 | lr 0.0100
| epoch 33 | test_acc 0.7450 test_loss 0.8440 | lr 0.0100
---------------------------------------------------------------------
| epoch 34 | train_acc 0.7959 train_loss 0.6637 | lr 0.0100
| epoch 34 | test_acc 0.7607 test_loss 0.8006 | lr 0.0100
---------------------------------------------------------------------
| epoch 35 | train_acc 0.8019 train_loss 0.6419 | lr 0.0100
| epoch 35 | test_acc 0.7612 test_loss 0.7860 | lr 0.0100
---------------------------------------------------------------------
| epoch 36 | train_acc 0.8079 train_loss 0.6362 | lr 0.0100
| epoch 36 | test_acc 0.7603 test_loss 0.8057 | lr 0.0100
---------------------------------------------------------------------
| epoch 37 | train_acc 0.8098 train_loss 0.6221 | lr 0.0100
| epoch 37 | test_acc 0.7653 test_loss 0.7906 | lr 0.0100
---------------------------------------------------------------------
| epoch 38 | train_acc 0.8177 train_loss 0.6031 | lr 0.0100
| epoch 38 | test_acc 0.7616 test_loss 0.7955 | lr 0.0100
---------------------------------------------------------------------
| epoch 39 | train_acc 0.8175 train_loss 0.5919 | lr 0.0100
| epoch 39 | test_acc 0.7694 test_loss 0.7917 | lr 0.0100
---------------------------------------------------------------------
| epoch 40 | train_acc 0.8215 train_loss 0.5807 | lr 0.0100
| epoch 40 | test_acc 0.7719 test_loss 0.7851 | lr 0.0100
---------------------------------------------------------------------
| epoch 41 | train_acc 0.8254 train_loss 0.5681 | lr 0.0100
| epoch 41 | test_acc 0.7835 test_loss 0.7447 | lr 0.0100
---------------------------------------------------------------------
| epoch 42 | train_acc 0.8294 train_loss 0.5581 | lr 0.0100
| epoch 42 | test_acc 0.7872 test_loss 0.7360 | lr 0.0100
---------------------------------------------------------------------
| epoch 43 | train_acc 0.8365 train_loss 0.5316 | lr 0.0100
| epoch 43 | test_acc 0.7818 test_loss 0.7307 | lr 0.0100
---------------------------------------------------------------------
| epoch 44 | train_acc 0.8353 train_loss 0.5370 | lr 0.0100
| epoch 44 | test_acc 0.7893 test_loss 0.7321 | lr 0.0100
---------------------------------------------------------------------
| epoch 45 | train_acc 0.8385 train_loss 0.5235 | lr 0.0100
| epoch 45 | test_acc 0.7872 test_loss 0.7364 | lr 0.0100
---------------------------------------------------------------------
| epoch 46 | train_acc 0.8419 train_loss 0.5157 | lr 0.0100
| epoch 46 | test_acc 0.7595 test_loss 0.7891 | lr 0.0100
---------------------------------------------------------------------
| epoch 47 | train_acc 0.8457 train_loss 0.5024 | lr 0.0100
| epoch 47 | test_acc 0.7897 test_loss 0.7117 | lr 0.0100
---------------------------------------------------------------------
| epoch 48 | train_acc 0.8458 train_loss 0.4942 | lr 0.0100
| epoch 48 | test_acc 0.7901 test_loss 0.7154 | lr 0.0100
---------------------------------------------------------------------
| epoch 49 | train_acc 0.8523 train_loss 0.4861 | lr 0.0100
| epoch 49 | test_acc 0.7860 test_loss 0.7141 | lr 0.0100
---------------------------------------------------------------------
| epoch 50 | train_acc 0.8543 train_loss 0.4706 | lr 0.0100
| epoch 50 | test_acc 0.8008 test_loss 0.6949 | lr 0.0100
---------------------------------------------------------------------
  1. 模型评估
model.eval()  # 切换为测试模式
test_acc, test_loss = test(valid_dataloader, model, criterion)
print('模型准确率为:{:5.4f}'.format(test_acc))

代码输出

模型准确率为:0.7926
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Transformer可以用于实现文本分类任务。它是一种基于自注意力机制的神经网络模型,它能够对输入的文本进行编码和理解,从而有效地捕捉文本的语义信息。Transformer通过多层的编码器和解码器构成,其中编码器负责对输入文本进行特征提取,解码器则负责生成输出。在文本分类任务中,可以使用Transformer的编码器部分来进行特征提取,并将提取到的特征输入到全连接层进行分类。引用中提到的文章《The Illustrated Transformer》以及《The Annotated Transformer》可以帮助你更好地理解Transformer的机制和原理。另外,引用中提到的Happy Transformer这个工具包可以简化Transformer模型实现和训练过程。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *2* [6. 文本分类——transformer模型](https://blog.csdn.net/qq_38293297/article/details/105027845)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *3* [【NLP】使用Transformer模型进行文本分类](https://blog.csdn.net/fengdu78/article/details/120878265)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值