NLP-文本分类-MLP

NLP-文本分类-MLP

# -*- ecoding: utf-8 -*-
# @Author: SuperLong
# @Email: miu_zxl@163.com
# @Time: 2024/8/25 20:26
import os
import torch
import argparse
import torch.nn as nn
from pathlib import Path
from sklearn.metrics import accuracy_score
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
import pickle as pkl
import warnings
warnings.filterwarnings('ignore')
path = Path(__file__).parent

def read_data(data_str):
    text, label, max_len = [], [], []
    with open(os.path.join(args.data_dif, f'{data_str}.txt'), 'r', encoding='utf-8') as f:
        for line in f.readlines():
            if not line:
                continue
            text_i, label_i = line.strip().split('\t')
            text.append(text_i)
            label.append(label_i)
            max_len.append(len(text_i))
    return text, label, max(max_len)

class Datasets(Dataset):
    def __init__(self, text, label, word2idx, len_max):
        self.text = text
        self.label = label
        self.word2idx = word2idx
        self.len_max = len_max
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

    def __len__(self):
        return len(self.text)

    def __getitem__(self, idx):
        text_ = self.text[idx][:self.len_max]
        text_id = [self.word2idx.get(text_i, 1) for text_i in text_]
        text_id = text_id + [0] * (self.len_max - len(text_id))
        text_id = torch.tensor(text_id).to(self.device)
        label_ = self.label[idx]
        return text_id, torch.tensor(int(label_)).to(self.device)


def build_dict(train_text, embedding_size):
    word2idx = {"<PAD>": 0, "<UNK>": 1}
    for text_i in train_text:
        for word in text_i:
            word2idx[word] = word2idx.get(word, len(word2idx))
    embedding = nn.Embedding(len(word2idx), embedding_size)
    pkl.dump([word2idx,embedding], open(args.emb_words, 'wb'))
    return word2idx, embedding


class Mymodel(nn.Module):
    def __init__(self, vocal_size, embedding_matrix, hidden_size, class_num, dropout_rage=0.1):
        super(Mymodel, self).__init__()
        self.vocal_size = vocal_size
        self.embedding_size = embedding_matrix.weight.shape[1]
        self.hidden_size = hidden_size
        self.class_num = class_num
        self.emb = embedding_matrix
        self.linear = nn.Linear(self.embedding_size, class_num)
        self.doupout = nn.Dropout(dropout_rage)

    def forward(self, x):
        x = self.emb(x)
        x = x.mean(dim=1)
        out = self.linear(x)
        return out

def generate(args,texts):

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    train_text, train_label, len_max = read_data('train')
    if os.path.exists(args.emb_words):
        datas = pkl.load(open(args.emb_words, 'rb'))
        word2idx, embedding_matrix = datas[0], datas[1]
    else:
        word2idx, embedding_matrix = build_dict(train_text, args.embedding_size)
    model = Mymodel(args.vocal_size, embedding_matrix, args.hidden_size, args.class_num).to(device)
    model.load_state_dict(torch.load('best_model.pth'))
    with open(args.data_dif + '\\class.txt', 'r') as f:
        class_list = f.read().strip().split('\n')
    result = {}
    for text in texts:
        text_id = [word2idx.get(text_i, 1) for text_i in text]
        text_id += [0] * (len_max - len(text_id))
        text_id = torch.tensor(text_id).unsqueeze(dim=0).to(device)
        model.eval()
        with torch.no_grad():
            pred = model(text_id)
            pred = pred.argmax(dim=1).cpu().numpy().tolist()
            true_word = class_list[pred[0]]
            result[text] = true_word
    print(result)


def main(args):
    # 读取数据
    train_text, train_label, len_max = read_data('train')

    if os.path.exists(args.emb_words):
        datas = pkl.load(open(args.emb_words, 'rb'))
        word2idx, embedding_matrix = datas[0], datas[1]
    else:
        word2idx, embedding_matrix = build_dict(train_text, args.embedding_size)

    train_data = DataLoader(dataset=Datasets(train_text, train_label, word2idx, len_max), batch_size=args.batch_size,
                            shuffle=True)

    dev_text, dev_label, _ = read_data('dev')
    dev_data = DataLoader(dataset=Datasets(dev_text, dev_label, word2idx, len_max), batch_size=args.batch_size,
                          shuffle=True)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = Mymodel(args.vocal_size, embedding_matrix, args.hidden_size, args.class_num).to(device)
    loss_func = nn.CrossEntropyLoss()
    optimizer = Adam(model.parameters(), lr=args.learning_rate)
    best_acc = float("-inf")
    for epoch in range(args.epoch):
        len_loss, counts = 0, 0
        train_pre, train_true = [], []
        model.train()
        for step, (text, label) in enumerate(train_data):
            label = label.to(device)
            optimizer.zero_grad()
            out = model(text)
            loss = loss_func(out, label)
            loss.backward()
            optimizer.step()
            len_loss += loss.item()
            counts += 1

            train_pre.extend(out.argmax(dim=1).cpu().numpy().tolist())
            train_true.extend(label.cpu().numpy().tolist())

            if step % 100 == 0:
                train_acc = accuracy_score(train_pre, train_true)
                print('Epoch:{0:} step:{1} loss:{2:.4f} train_acc:{3:.4f}'.format(epoch, step, len_loss / counts,
                                                                                  train_acc))
        model.eval()
        dev_pre, dev_true = [], []
        for step, (text, label) in enumerate(dev_data):
            label = label.to(device)
            with torch.no_grad():
                out = model(text)
                pre = out.argmax(dim=1).cpu().numpy().tolist()
                label = label.cpu().numpy().tolist()
                dev_pre.extend(pre), dev_true.extend(label)
        dev_acc = accuracy_score(dev_pre, dev_true)
        print('Epoch:{0} dev_acc:{1:.4f}'.format(epoch, dev_acc))

        if best_acc < dev_acc:
            best_acc = dev_acc
            torch.save(model.state_dict(), 'best_model.pth')
    print("训练结束!开始测试")

    test_text, test_label, _ = read_data('test')
    test_data = DataLoader(dataset=Datasets(test_text, test_label, word2idx, len_max), batch_size=args.batch_size,
                           shuffle=True)
    model = Mymodel(args.vocal_size, embedding_matrix, args.hidden_size, args.class_num).to(device)
    model.load_state_dict(torch.load('best_model.pth'))
    dev_pre, dev_true = [], []
    for step, (text, label) in enumerate(test_data):
        label = label.to(device)
        with torch.no_grad():
            out = model(text)
            pre = out.argmax(dim=1).cpu().numpy().tolist()
            label = label.cpu().numpy().tolist()
            dev_pre.extend(pre), dev_true.extend(label)
    acc = accuracy_score(dev_pre, dev_true)
    print("测试结果为:{:.4f}".format(acc))


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description="MLP文本分类超参数设置")
    parser.add_argument("--data_dif", type=str, default=os.path.join(path, 'data', 'class_data'))
    parser.add_argument("--out_dir", type=str, default=os.path.join(path, "results"))
    parser.add_argument("--bset_model", type=str, default=os.path.join(path, "best-model-mlp.pth"))
    parser.add_argument("--emb_words", type=str, default=os.path.join(path, "emb_words-mlp.pkl"))
    parser.add_argument("--batch_size", type=int, default=32)
    parser.add_argument("--epoch", type=int, default=10)
    parser.add_argument("--learning_rate", type=float, default=0.001)
    parser.add_argument("--vocal_size", type=int, default=10000)
    parser.add_argument("--embedding_size", type=int, default=1024)
    parser.add_argument("--hidden_size", type=str, default=1024)
    parser.add_argument("--class_num", type=str, default=10)
    args = parser.parse_args()
    # 训练 验证 测试
    main(args)
    # 实例测试
    texts = ["吃饭了吗"]
    generate(args,texts)

目前仅专注于NLP的技术学习和分享

感谢大家的关注与支持!
  • 19
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值