bert文本分类

之前做过分类,但是好多代码是我自己写的,没有很规范,最近整理了一下

# -*- coding:utf-8 -*-
# bert文本分类baseline模型

import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.optim as optim
import transformers
from transformers import AutoModel, AutoTokenizer, BertTokenizer
import matplotlib.pyplot as plt

train_curve = []
# 方便GPU与CPU之间转换
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 每次循环的样本个数
batch_size = 2
# 所有的样本循环多少次
epoches = 20
# 模型名称,有可以换其他模型,如albert_chinese_small
model = "bert-base-chinese"
# 这个参数一般和模型有关系,一般在模型的config.json中设置
hidden_size = 768
# 分类的个数,这是个二分类,所以是2
n_class = 2
# 每个段落最大字符个数,一般不足maxlen ,就用一个字符代替,多余maxlen 就直接截断
maxlen = 20

# 文本数据,这样就不用自己找文本了,当然可以方便的吧你的数据替换
sentences = ['我想用CNN做个情感分析,这个语句是我喜欢的',
             '哈哈哈,万年刮痧王李白终于加强了',
             '这个游戏好极了,个别英雄强度超标,游戏里面英雄种类丰富,我太菜,求大佬带飞',
             '我觉得是个好游戏',
             '这个模型准确度好垃圾,我不喜欢',
             '王者必糊,小学生没防到,还把一群初中生,什么时候没人脸识别,什么时候回归',
             '快去吧健康系统去掉,不然举报',
             '垃圾mht,还要人脸识别微信',
             '那些没脑子玩家就别下载了',
             ]
labels = [1, 1, 1, 1, 0, 0, 0, 0, 0]  # 1积极, 0消极.
# 数据处理模型
class MyDataset(Data.Dataset):
    def __init__(self, sentences, labels=None, with_labels=True):
        self.tokenizer = BertTokenizer.from_pretrained(model)
        self.with_labels = with_labels
        self.sentences = sentences
        self.labels = labels

    def __len__(self):
        return len(sentences)

    def __getitem__(self, index):
        sent = self.sentences[index]
        encoded_pair = self.tokenizer(sent,
                                      padding='max_length',  # 填充到最大值
                                      truncation=True,  # 截断到最大值
                                      max_length=maxlen,# 最大值
                                      return_tensors='pt')  # 返回pytorch类型

        token_ids = encoded_pair['input_ids'].squeeze(0)  
        # # input_ids为讲文本转化为数字,前后加一个特殊字符101(开始字符),102(结束字符),0(填充字符)
        attn_masks = encoded_pair['attention_mask'].squeeze(
            0)  
            # # attention_mask 当有填充字符0是,字符为0,其余为1
        token_type_ids = encoded_pair['token_type_ids'].squeeze(
            0)  # token_type_ids 第一段是为0,第二段为1

        if self.with_labels:
            label = self.labels[index]
            return token_ids, attn_masks, token_type_ids, label
        else:
            return token_ids, attn_masks, token_type_ids


train = Data.DataLoader(dataset=MyDataset(sentences, labels), batch_size=batch_size, shuffle=True, num_workers=0)


# model
class BertClassify(nn.Module):
    def __init__(self):
        super(BertClassify, self).__init__()
        self.bert = AutoModel.from_pretrained(model, output_hidden_states=True, return_dict=True)
        self.linear = nn.Linear(hidden_size, n_class)
        self.dropout = nn.Dropout(0.5)

    def forward(self, X):
        input_ids, attention_mask, token_type_ids = X[0], X[1], X[2]
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask,
                            token_type_ids=token_type_ids)  # 返回一个output字典
        # 用最后一层cls向量做分类
        # outputs.pooler_output: [bs, hidden_size]
        logits = self.linear(self.dropout(outputs.pooler_output))

        return logits


bc = BertClassify().to(device)


optimizer = optim.Adam(bc.parameters(), lr=5e-3, weight_decay=1e-2)
loss_fn = nn.CrossEntropyLoss()

# train
sum_loss = 0
total_step = len(train)
for epoch in range(epoches):
    for i, batch in enumerate(train):
        optimizer.zero_grad()
        batch = tuple(p.to(device) for p in batch)
        pred = bc([batch[0], batch[1], batch[2]])
        loss = loss_fn(pred, batch[3])
        sum_loss += loss.item()
        loss.backward()
        optimizer.step()
        if epoch % 10 == 0:
            print('[{}|{}] step:{}/{} loss:{:.4f}'.format(epoch + 1, epoches, i + 1, total_step, loss.item()))
    train_curve.append(sum_loss)
    sum_loss = 0

# test
bc.eval()
with torch.no_grad():
    test_text = ['我干将无敌']
    test = MyDataset(test_text, labels=None, with_labels=False)
    x = test.__getitem__(0)
    x = tuple(p.unsqueeze(0).to(device) for p in x)
    pred = bc([x[0], x[1], x[2]])
    pred = pred.data.max(dim=1, keepdim=True)[1]
    if pred[0][0] == 0:
        print('消极')
    else:
        print('积极')

# loss曲线
print(plt.plot(pd.DataFrame(train_curve)))
plt.show()

如果有不懂,或者我说错的,欢迎指正,可以加群753035545

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值