使用Word2vec实现文本分类

code

import torch
import os
import PIL
import pathlib
import warnings
from torch import nn
import time
import pandas as pd
from torchvision import transforms, datasets
import jieba

warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

# 加载自定义中文数据
train_data = pd.read_csv('./train.csv', sep='\t', header=None)
print(train_data.head())

# 构造数据集迭代器


def custom_data_iter(texts, labels):
    for x, y in zip(texts, labels):
        yield x, y


x = train_data[0].values[:]
# 多类标签的one-hot展开
y = train_data[1].values[:]

cpu
0 1
0 还有双鸭山到淮阴的汽车票吗13号的 Travel-Query
1 从这里怎么回家 Travel-Query
2 随便播放一首专辑阁楼里的佛里的歌 Music-Play
3 给看一下墓王之王嘛 FilmTele-Play
4 我想看挑战两把s686打突变团竞的游戏视频 Video-Play

# 构建词典
from gensim.models.word2vec import Word2Vec
import numpy as np

# 训练 Word2Vec 浅层神经网络模型
w2v = Word2Vec(vector_size=100,  # 是指特征向量的维度,默认为100。
               min_count=3)  # 可以对字典做截断. 词频少于min_count次数的单词会被丢弃掉, 默认值为5。

w2v.build_vocab(x)
w2v.train(x,
          total_examples=w2v.corpus_count,
          epochs=20)

(2733351, 3663560)

# 将文本转化为向量
def average_vec(text):
    vec = np.zeros(100).reshape((1, 100))
    for word in text:
        try:
            vec += w2v.wv[word].reshape((1, 100))
        except KeyError:
            continue
    return vec


# 将词向量保存为Ndarray
x_vec = np.concatenate([average_vec(z) for z in x])
w2v.save('w2v_model.pkl')

train_iter = custom_data_iter(x_vec, y)
print(len(x), len(x_vec))

label_name = list(set(train_data[1].values[:]))
print(label_name)

12100 12100
[‘Weather-Query’, ‘Other’, ‘TVProgram-Play’, ‘HomeAppliance-Control’, ‘Video-Play’, ‘Music-Play’, ‘Alarm-Update’, ‘Calendar-Query’, ‘Travel-Query’, ‘Audio-Play’, ‘FilmTele-Play’, ‘Radio-Listen’]

# 生成数据批次和迭代器
from torch.utils.data import DataLoader
def text_pipeline(x): return average_vec(x)
def label_pipeline(x): return label_name.index(x)


print(text_pipeline("你在干嘛"))
print(label_pipeline("Travel-Query"))


def collate_batch(batch):
    label_list, text_list = [], []
    for (_text, _label) in batch:
        # 标签列表
        label_list.append(label_pipeline(_label))
        # 文本列表
        processed_text = torch.tensor(
            text_pipeline(_text), dtype=torch.float32)
        text_list.append(processed_text)

    label_list = torch.tensor(label_list, dtype=torch.int64)
    text_list = torch.cat(text_list)

    return text_list.to(device), label_list.to(device)


dataloader = DataLoader(train_iter, batch_size=8,
                        shuffle=False, collate_fn=collate_batch)

[[ 0.49125925 1.76200503 1.42836637 0.2336775 -1.86941468 -0.64106666
2.0376932 0.90230727 1.13129044 -1.13996162 -0.72468241 -3.84760308
1.6134553 -0.3777014 0.46848102 1.3129719 2.22339706 -2.35581883
3.51350563 -1.15079367 2.55433842 -0.52564386 -0.29135052 0.50830008
-1.15970288 -0.52854031 -1.94023792 -0.60582515 1.39502006 -0.70776732
2.26604638 0.66990583 -1.54779671 -0.51975434 0.73017757 -0.47893354
0.36721439 3.30658033 -2.06657192 0.19992781 -0.25798243 1.48585584
-0.62958179 0.77358438 -0.03864239 -0.09659288 1.58908416 -0.47846629
-3.53186813 2.06811777 0.3690801 -1.70200042 -0.82840479 0.91278509
-0.50800064 -1.34892554 1.68375771 -0.79423073 -0.03868832 1.18047251
2.23625344 -0.84048931 2.10552567 -0.74978155 -0.47328901 0.24064685
-1.63275661 1.17315704 1.43820919 1.09781681 -0.41357784 0.35364153
1.35206379 0.07496612 0.90073148 -0.81673267 -4.80953123 -0.09257413
1.12208835 -1.26514172 -1.27734715 0.42664272 -1.7156489 2.23204263
-2.3197163 -1.69975625 2.61684819 -1.13212183 -1.37916414 -0.34060088
0.30976317 -1.81669107 0.22761388 1.62882162 0.9501656 -1.58427442
0.12925234 0.91722789 -2.78218991 -0.05787323]]
8

# 搭建模型
class TextClassificationModel(nn.Module):
    def __init__(self, num_class):
        super(TextClassificationModel, self).__init__()
        self.fc = nn.Linear(100, num_class)

    def forward(self, text):
        return self.fc(text)
# 初始化模型
num_class = len(label_name)
vocab_size = 100000
em_size = 12
model = TextClassificationModel(num_class).to(device)
# 定义训练和评估函数
def train(dataloader):
    model.train()  # 切换为训练模式
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 50
    start_time = time.time()
    for idx, (text, label) in enumerate(dataloader):
        predicted_label = model(text)
        optimizer.zero_grad()  # grad属性归零
        loss = criterion(predicted_label, label)  # 计算网络输出和真实值之间的差距,label为真
        loss.backward()  # 反向传播
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)  # 梯度裁剪
        optimizer.step()  # 每一步自动更新

        # 记录acc与loss
        total_acc += (predicted_label.argmax(1) == label).sum().item()
        train_loss += loss.item()
        total_count += label.size(0)
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('|epoch{:d}|{:4d}/{:4d} batches|train_acc{:4.3f} train_loss{:4.5f}'.format(epoch, idx, len(dataloader),
                                                                                             total_acc / total_count, train_loss / total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()


def evaluate(dataloader):
    model.eval()  # 切换为测试模式
    total_acc, train_loss, total_count = 0, 0, 0
    with torch.no_grad():
        for idx, (text, label) in enumerate(dataloader):
            predicted_label = model(text)
            loss = criterion(predicted_label, label)  # 计算loss值
            # 记录测试数据
            total_acc += (predicted_label.argmax(1) == label).sum().item()
            train_loss += loss.item()
            total_count += label.size(0)

    return total_acc / total_count, train_loss / total_count
# 拆分数据集并运行模型
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# 超参数设定
EPOCHS = 10  # epoch
LR = 4  # learningRate
BATCH_SIZE = 64  # batch size for training

# 设置损失函数、选择优化器、设置学习率调整函数
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
total_accu = None

# 构建数据集
train_iter = custom_data_iter(train_data[0].values[:], train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)
split_train_, split_valid_ = random_split(train_dataset,
                                          [int(len(train_dataset) * 0.8), int(len(train_dataset) * 0.2)])

train_dataloader = DataLoader(
    split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(
    split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)

# 3.2 正式训练
for epoch in range(1, EPOCHS + 1):
    epoch_start_time = time.time()
    train(train_dataloader)
    val_acc, val_loss = evaluate(valid_dataloader)
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    if total_accu is not None and total_accu > val_acc:
        scheduler.step()
    else:
        total_accu = val_acc
    print('-' * 69)
    print('| epoch {:d} | time:{:4.2f}s |'
          ' valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch, time.time() - epoch_start_time,
                                                                      val_acc, val_loss, lr))
    print('-' * 69)

test_acc, test_loss = evaluate(valid_dataloader)
print('模型准确率为:{:5.4f}'.format(test_acc))

|epoch1| 50/ 152 batches|train_acc0.751 train_loss0.02129
|epoch1| 100/ 152 batches|train_acc0.832 train_loss0.01504
|epoch1| 150/ 152 batches|train_acc0.843 train_loss0.01488

| epoch 1 | time:1.56s | valid_acc 0.828 valid_loss 0.016 | lr 4.000000

|epoch2| 50/ 152 batches|train_acc0.848 train_loss0.01405
|epoch2| 100/ 152 batches|train_acc0.839 train_loss0.01583
|epoch2| 150/ 152 batches|train_acc0.848 train_loss0.01442

| epoch 2 | time:1.16s | valid_acc 0.821 valid_loss 0.016 | lr 4.000000

|epoch3| 50/ 152 batches|train_acc0.885 train_loss0.00877
|epoch3| 100/ 152 batches|train_acc0.900 train_loss0.00762
|epoch3| 150/ 152 batches|train_acc0.899 train_loss0.00673

| epoch 3 | time:1.30s | valid_acc 0.865 valid_loss 0.009 | lr 0.400000

|epoch4| 50/ 152 batches|train_acc0.903 train_loss0.00671
|epoch4| 100/ 152 batches|train_acc0.899 train_loss0.00610
|epoch4| 150/ 152 batches|train_acc0.897 train_loss0.00590

| epoch 4 | time:1.26s | valid_acc 0.867 valid_loss 0.008 | lr 0.400000

|epoch5| 50/ 152 batches|train_acc0.906 train_loss0.00550

| epoch 10 | time:2.17s | valid_acc 0.872 valid_loss 0.007 | lr 0.000040

模型准确率为:0.8719
Output is truncated. View as a scrollable element or open in a text editor. Adjust cell output settings…

# 测试指定的数据
def predict(text, text_pipeline):
    with torch.no_grad():
        text = torch.tensor(text_pipeline(text), dtype=torch.float32)
        print(text.shape)
        output = model(text)
        return output.argmax(1).item()
 
 
ex_text_str = "还有双鸭山到淮阴的汽车票吗13号的"
model = model.to("cpu")
 
print("该文本的类别是: %s" % label_name[predict(ex_text_str, text_pipeline)])

torch.Size([1, 100])
该文本的类别是: Travel-Query

总结

Word2vec是一种自然语言处理技术,用于将单词转换为向量形式,帮助衡量单词间的相似度。尽管它本身不直接用于文本分类,但生成的词向量是构建复杂文本分类模型(如基于神经网络的模型)的重要输入。通过训练Word2vec模型,我们可以将文本中的单词转换为向量,然后利用这些向量作为深度学习模型(如CNN、RNN或Transformer)的输入,来训练文本分类器。这个过程包括数据准备、Word2vec模型训练、文本分类模型设计、训练与评估,最终将模型部署到实际应用中,实现自动文本分类。

在文本分类任务中,首先利用Word2vec模型将文本中的单词转换为向量表示,然后构建基于这些向量的深度学习模型。模型通过训练学习如何将文本向量映射到相应的类别标签。这个过程涉及多个步骤,包括数据预处理、模型设计与训练、性能评估与优化,并最终将训练好的模型应用于实际场景,实现高效的文本分类功能。Word2vec与深度学习技术的结合,为文本分类提供了一种强大而灵活的方法。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值