(深度学习记录)第N6周:使用Word2vec实现文本分类

🏡我的环境:

  • 语言环境:Python3.11.4
  • 编译器:Jupyter Notebook
  • torcch版本:2.0.1

一、准备工作

1.数据预处理

import torch
import torch.nn as nn
import torchvision
from torchvision import transforms,datasets
import os,PIL,pathlib,warnings

warnings.filterwarnings("ignore")#忽略警告信息

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

import pandas as pd
#录入数据
train_data =pd.read_csv('/Users/wendyweng/Downloads/train.csv',sep='\t',header=None)
train_data.head()

#构造数据集迭代器
def coustom_data_iter(texts,labels):
    for x,y in zip(texts,labels):
        yield x,y
x = train_data[0].values[:]
y = train_data[1].values[:]

 2.构建词典

from gensim.models.word2vec import Word2Vec
import numpy as np

w2v = Word2Vec(vector_size=100,
              min_count=3)
w2v.build_vocab(x)
w2v.train(x,
         total_examples=w2v.corpus_count,
         epochs=20)

w2v=Word2Vec(x,vector_size=100,min_count=3,epochs=20)

def average_vec(text):
    vec = np.zeros(100).reshape((1,100))
    for word in text:
        try:
            vec += w2v.wv[word].reshape((1,100))
        except KeyError:
            continue
    return vec

x_vec = np.concatenate([average_vec(z) for z in x])

w2v.save('/Users/wendyweng/Desktop/w2v_model.pkl')

train_iter = coustom_data_iter(x_vec,y)

len(x),len(x_vec)

label_name =list(set(train_data[1].values[:]))
print(label_name)

3.生成数据批次和迭代器

text_pipeline = lambda x: average_vec(x)
label_pipeline = lambda x:label_name.index(x)

text_pipeline("你在干嘛")

label_pipeline("Travel-Query")

from torch.utils.data import DataLoader

def collate_batch(batch):
    label_list,text_list=[],[]
    
    for (_text,_label) in batch:
        label_list.append(label_pipeline(_label))
        
        processed_text = torch.tensor(text_pipeline(_text),dtype=torch.float32)
        text_list.append(processed_text)
    
    label_list = torch.tensor(label_list,dtype=torch.int64)
    text_list =torch.cat(text_list)
    
    return text_list.to(device),label_list.to(device)

dataloader = DataLoader(train_iter,
                       batch_size=8,
                       shuffle =False,
                       collate_fn=collate_batch)

二、构建模型 

from torch import nn

class TextClassificationModel(nn.Module):
    
    def __init__(self, num_class):
       super(TextClassificationModel, self).__init__()
       self.fc = nn.Linear(100,num_class)
        
    def forward(self,text):
        return self.fc(text)


num_class =len(label_name)
vacab_size =100000
em_size = 12
model = TextClassificationModel(num_class).to(device)

import time

def train(dataloader):
    model.train()  # 切换为训练模式
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 50
    start_time   = time.time()

    for idx, (text,label) in enumerate(dataloader):
        predicted_label = model(text)
        
        optimizer.zero_grad()                    # grad属性归零
        loss = criterion(predicted_label, label) # 计算网络输出和真实值之间的差距,label为真实值
        loss.backward()                          # 反向传播
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) # 梯度裁剪
        optimizer.step()  # 每一步自动更新
        
        # 记录acc与loss
        total_acc   += (predicted_label.argmax(1) == label).sum().item()
        train_loss  += loss.item()
        total_count += label.size(0)
        
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:1d} | {:4d}/{:4d} batches '
                  '| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx,len(dataloader),
                                              total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()

def evaluate(dataloader):
    model.eval()  # 切换为测试模式
    total_acc, train_loss, total_count = 0, 0, 0

    with torch.no_grad():
        for idx, (text,label) in enumerate(dataloader):
            predicted_label = model(text)
            
            loss = criterion(predicted_label, label)  # 计算loss值
            # 记录测试数据
            total_acc   += (predicted_label.argmax(1) == label).sum().item()
            train_loss  += loss.item()
            total_count += label.size(0)
            
    return total_acc/total_count, train_loss/total_count



from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# 超参数
EPOCHS     = 10 # epoch
LR         = 5  # 学习率
BATCH_SIZE = 64 # batch size for training

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None

# 构建数据集
train_iter    = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)

split_train_, split_valid_ = random_split(train_dataset,
                                          [int(len(train_dataset)*0.8),int(len(train_dataset)*0.2)])

train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
                              shuffle=True, collate_fn=collate_batch)

valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
                              shuffle=True, collate_fn=collate_batch)

for epoch in range(1, EPOCHS + 1):
    epoch_start_time = time.time()
    train(train_dataloader)
    val_acc, val_loss = evaluate(valid_dataloader)
    
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    if total_accu is not None and total_accu > val_acc:
        scheduler.step()
    else:
        total_accu = val_acc
    print('-' * 69)
    print('| epoch {:1d} | time: {:4.2f}s | '
          'valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch,
                                           time.time() - epoch_start_time,
                                           val_acc,val_loss,lr))

    print('-' * 69)

​​​​​​​

三、模型的预测与检验

test_acc,text_loss = evaluate(valid_dataloader)
print('模型准确率为:{:5.4f}'.format(test_acc))

def predict(text,text_pipeline):
    with torch.no_grad():
        text = torch.tensor(text_pipeline(text),dtype=torch.float32)
        print(text.shape)
        output =model(text)
        return output.argmax(1).item()

ex_text_str="还有双鸭山到淮阴的汽车票吗13号的"

model =model.to("cpu")
print("该文本的类别是:%s"%label_name[predict(ex_text_str,text_pipeline)])

 

四、小结 

 在代码的细节上注意格式的缩紧、大小写问题,会导致代码出错

  • 4
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值