应用预训练BERT模型做短文本情感分类

作者:陆平

#首先,需要安装paddlenlp2.0。
!pip install paddlenlp
#导入相关的模块
import paddle
import paddlenlp as ppnlp
from paddlenlp.data import Stack, Pad, Tuple
import paddle.nn.functional as F
import numpy as np
from functools import partial #partial()函数可以用来固定某些参数值,并返回一个新的callable对象

1. 数据导入

数据集为公开中文情感分析数据集ChnSenticorp。使用PaddleNLP的.datasets.ChnSentiCorp.get_datasets方法即可以加载该数据集。

#采用paddlenlp内置的ChnSentiCorp语料,该语料主要可以用来做情感分类。训练集用来训练模型,验证集用来选择模型,测试集用来评估模型泛化性能。
train_ds, dev_ds, test_ds = ppnlp.datasets.ChnSentiCorp.get_datasets(['train','dev','test'])

#获得标签列表
label_list = train_ds.get_labels()

#看看数据长什么样子,分别打印训练集、验证集、测试集的前3条数据。
print("训练集数据:{}\n".format(train_ds[0:3]))
print("验证集数据:{}\n".format(dev_ds[0:3]))
print("测试集数据:{}\n".format(test_ds[0:3]))

print("训练集样本个数:{}".format(len(train_ds)))
print("验证集样本个数:{}".format(len(dev_ds)))
print("测试集样本个数:{}".format(len(test_ds)))
训练集数据:[['选择珠江花园的原因就是方便,有电动扶梯直接到达海边,周围餐馆、食廊、商场、超市、摊位一应俱全。酒店装修一般,但还算整洁。 泳池在大堂的屋顶,因此很小,不过女儿倒是喜欢。 包的早餐是西式的,还算丰富。 服务吗,一般', '1'], ['15.4寸笔记本的键盘确实爽,基本跟台式机差不多了,蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错', '1'], ['房间太小。其他的都一般。。。。。。。。。', '0']]

验证集数据:[['這間酒店環境和服務態度亦算不錯,但房間空間太小~~不宣容納太大件行李~~且房間格調還可以~~ 中餐廳的廣東點心不太好吃~~要改善之~~~~但算價錢平宜~~可接受~~ 西餐廳格調都很好~~但吃的味道一般且令人等得太耐了~~要改善之~~', '1'], ['<荐书> 推荐所有喜欢<红楼>的红迷们一定要收藏这本书,要知道当年我听说这本书的时候花很长时间去图书馆找和借都没能如愿,所以这次一看到当当有,马上买了,红迷们也要记得备货哦!', '1'], ['商品的不足暂时还没发现,京东的订单处理速度实在.......周二就打包完成,周五才发货...', '0']]

测试集数据:[['这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般', '1'], ['怀着十分激动的心情放映,可是看着看着发现,在放映完毕后,出现一集米老鼠的动画片!开始还怀疑是不是赠送的个别现象,可是后来发现每张DVD后面都有!真不知道生产商怎么想的,我想看的是猫和老鼠,不是米老鼠!如果厂家是想赠送的话,那就全套米老鼠和唐老鸭都赠送,只在每张DVD后面添加一集算什么??简直是画蛇添足!!', '0'], ['还稍微重了点,可能是硬盘大的原故,还要再轻半斤就好了。其他要进一步验证。贴的几种膜气泡较多,用不了多久就要更换了,屏幕膜稍好点,但比没有要强多了。建议配赠几张膜让用用户自己贴。', '0']]

训练集样本个数:9600
验证集样本个数:1200
测试集样本个数:1200

2.数据预处理

#调用ppnlp.transformers.BertTokenizer进行数据处理,tokenizer可以把原始输入文本转化成模型model可接受的输入数据格式。
tokenizer = ppnlp.transformers.BertTokenizer.from_pretrained("bert-base-chinese")

#数据预处理
def convert_example(example,tokenizer,label_list,max_seq_length=256,is_test=False):
    if is_test:
        text = example
    else:
        text, label = example
    #tokenizer.encode方法能够完成切分token,映射token ID以及拼接特殊token
    encoded_inputs = tokenizer.encode(text=text, max_seq_len=max_seq_length)
    input_ids = encoded_inputs["input_ids"]
    segment_ids = encoded_inputs["segment_ids"]

    if not is_test:
        label_map = {}
        for (i, l) in enumerate(label_list):
            label_map[l] = i

        label = label_map[label]
        label = np.array([label], dtype="int64")
        return input_ids, segment_ids, label
    else:
        return input_ids, segment_ids

#数据迭代器构造方法
def create_dataloader(dataset, trans_fn=None, mode='train', batch_size=1, use_gpu=False, pad_token_id=0, batchify_fn=None):
    if trans_fn:
        dataset = dataset.apply(trans_fn, lazy=True)

    if mode == 'train' and use_gpu:
        sampler = paddle.io.DistributedBatchSampler(dataset=dataset, batch_size=batch_size, shuffle=True)
    else:
        shuffle = True if mode == 'train' else False #如果不是训练集,则不打乱顺序
        sampler = paddle.io.BatchSampler(dataset=dataset, batch_size=batch_size, shuffle=shuffle) #生成一个取样器
    dataloader = paddle.io.DataLoader(dataset, batch_sampler=sampler, return_list=True, collate_fn=batchify_fn)
    return dataloader

#使用partial()来固定convert_example函数的tokenizer, label_list, max_seq_length, is_test等参数值
trans_fn = partial(convert_example, tokenizer=tokenizer, label_list=label_list, max_seq_length=128, is_test=False)
batchify_fn = lambda samples, fn=Tuple(Pad(axis=0,pad_val=tokenizer.pad_token_id), Pad(axis=0, pad_val=tokenizer.pad_token_id), Stack(dtype="int64")):[data for data in fn(samples)]
#训练集迭代器
train_loader = create_dataloader(train_ds, mode='train', batch_size=64, batchify_fn=batchify_fn, trans_fn=trans_fn)
#验证集迭代器
dev_loader = create_dataloader(dev_ds, mode='dev', batch_size=64, batchify_fn=batchify_fn, trans_fn=trans_fn)
#测试集迭代器
test_loader = create_dataloader(test_ds, mode='test', batch_size=64, batchify_fn=batchify_fn, trans_fn=trans_fn)
[2021-01-23 20:44:09,216] [    INFO] - Found /home/aistudio/.paddlenlp/models/bert-base-chinese/bert-base-chinese-vocab.txt

3. BERT预训练模型加载

#加载预训练模型Bert用于文本分类任务的Fine-tune网络BertForSequenceClassification, 它在BERT模型后接了一个全连接层进行分类。
#由于本任务中的情感分类是二分类问题,设定num_classes为2
model = ppnlp.transformers.BertForSequenceClassification.from_pretrained("bert-base-chinese", num_classes=2)
[2021-01-23 20:44:12,052] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/bert-base-chinese/bert-base-chinese.pdparams
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1245: UserWarning: Skip loading for classifier.weight. classifier.weight is not found in the provided dict.
  warnings.warn(("Skip loading for {}. ".format(key) + str(err)))
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1245: UserWarning: Skip loading for classifier.bias. classifier.bias is not found in the provided dict.
  warnings.warn(("Skip loading for {}. ".format(key) + str(err)))

4. 训练模型

#设置训练超参数

#学习率
learning_rate = 1e-5 
#训练轮次
epochs = 20
#学习率预热比率
warmup_proption = 0.1
#权重衰减系数
weight_decay = 0.01

num_training_steps = len(train_loader) * epochs
num_warmup_steps = int(warmup_proption * num_training_steps)

def get_lr_factor(current_step):
    if current_step < num_warmup_steps:
        return float(current_step) / float(max(1, num_warmup_steps))
    else:
        return max(0.0,
                    float(num_training_steps - current_step) /
                    float(max(1, num_training_steps - num_warmup_steps)))
#学习率调度器
lr_scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate, lr_lambda=lambda current_step: get_lr_factor(current_step))

#优化器
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun=lambda x: x in [
        p.name for n, p in model.named_parameters()
        if not any(nd in n for nd in ["bias", "norm"])
    ])

#损失函数
criterion = paddle.nn.loss.CrossEntropyLoss()
#评估函数
metric = paddle.metric.Accuracy()
#评估函数
def evaluate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in data_loader:
        input_ids, segment_ids, labels = batch
        logits = model(input_ids, segment_ids)
        loss = criterion(logits, labels)
        losses.append(loss.numpy())
        correct = metric.compute(logits, labels)
        metric.update(correct)
        accu = metric.accumulate()
    print("eval loss: %.5f, accu: %.5f" % (np.mean(losses), accu))
    model.train()
    metric.reset()
#开始训练
global_step = 0
for epoch in range(1, epochs + 1):
    for step, batch in enumerate(train_loader, start=1): #从训练数据迭代器中取数据
        input_ids, segment_ids, labels = batch
        logits = model(input_ids, segment_ids)
        loss = criterion(logits, labels) #计算损失
        probs = F.softmax(logits, axis=1)
        correct = metric.compute(probs, labels)
        metric.update(correct)
        acc = metric.accumulate()

        global_step += 1
        if global_step % 50 == 0 :
            print("global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f" % (global_step, epoch, step, loss, acc))
        loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.clear_gradients()
    evaluate(model, criterion, metric, dev_loader)
global step 50, epoch: 1, batch: 50, loss: 0.05353, acc: 0.98031
global step 100, epoch: 1, batch: 100, loss: 0.07696, acc: 0.97937
global step 150, epoch: 1, batch: 150, loss: 0.04546, acc: 0.97688
eval loss: 0.19252, accu: 0.94333
global step 200, epoch: 2, batch: 50, loss: 0.07705, acc: 0.97875
global step 250, epoch: 2, batch: 100, loss: 0.01547, acc: 0.97641
global step 300, epoch: 2, batch: 150, loss: 0.03168, acc: 0.97625
eval loss: 0.22408, accu: 0.94250
global step 350, epoch: 3, batch: 50, loss: 0.01953, acc: 0.97250
global step 400, epoch: 3, batch: 100, loss: 0.04426, acc: 0.97313
global step 450, epoch: 3, batch: 150, loss: 0.14212, acc: 0.97552
eval loss: 0.22331, accu: 0.94000
global step 500, epoch: 4, batch: 50, loss: 0.01269, acc: 0.98844
global step 550, epoch: 4, batch: 100, loss: 0.00617, acc: 0.98828
global step 600, epoch: 4, batch: 150, loss: 0.01005, acc: 0.98740
eval loss: 0.23532, accu: 0.94333
global step 650, epoch: 5, batch: 50, loss: 0.01199, acc: 0.99031
global step 700, epoch: 5, batch: 100, loss: 0.00847, acc: 0.98969
global step 750, epoch: 5, batch: 150, loss: 0.09089, acc: 0.98906
eval loss: 0.27197, accu: 0.94167
global step 800, epoch: 6, batch: 50, loss: 0.03908, acc: 0.99094
global step 850, epoch: 6, batch: 100, loss: 0.00353, acc: 0.99094
global step 900, epoch: 6, batch: 150, loss: 0.02894, acc: 0.99021
eval loss: 0.25898, accu: 0.94583
global step 950, epoch: 7, batch: 50, loss: 0.00265, acc: 0.99281
global step 1000, epoch: 7, batch: 100, loss: 0.01475, acc: 0.99297
global step 1050, epoch: 7, batch: 150, loss: 0.01665, acc: 0.99250
eval loss: 0.30667, accu: 0.93833
global step 1100, epoch: 8, batch: 50, loss: 0.03144, acc: 0.99531
global step 1150, epoch: 8, batch: 100, loss: 0.01550, acc: 0.99328
global step 1200, epoch: 8, batch: 150, loss: 0.02013, acc: 0.99260
eval loss: 0.30897, accu: 0.94000
global step 1250, epoch: 9, batch: 50, loss: 0.00162, acc: 0.99594
global step 1300, epoch: 9, batch: 100, loss: 0.00215, acc: 0.99438
global step 1350, epoch: 9, batch: 150, loss: 0.00489, acc: 0.99313
eval loss: 0.29696, accu: 0.94167
global step 1400, epoch: 10, batch: 50, loss: 0.00503, acc: 0.99438
global step 1450, epoch: 10, batch: 100, loss: 0.00087, acc: 0.99516
global step 1500, epoch: 10, batch: 150, loss: 0.01060, acc: 0.99458
eval loss: 0.35558, accu: 0.93750
global step 1550, epoch: 11, batch: 50, loss: 0.01001, acc: 0.99500
global step 1600, epoch: 11, batch: 100, loss: 0.01988, acc: 0.99500
global step 1650, epoch: 11, batch: 150, loss: 0.01513, acc: 0.99479
eval loss: 0.34632, accu: 0.93750
global step 1700, epoch: 12, batch: 50, loss: 0.00128, acc: 0.99375
global step 1750, epoch: 12, batch: 100, loss: 0.00086, acc: 0.99500
global step 1800, epoch: 12, batch: 150, loss: 0.01030, acc: 0.99438
eval loss: 0.34417, accu: 0.94083
global step 1850, epoch: 13, batch: 50, loss: 0.02894, acc: 0.99438
global step 1900, epoch: 13, batch: 100, loss: 0.00071, acc: 0.99500
global step 1950, epoch: 13, batch: 150, loss: 0.03028, acc: 0.99438
eval loss: 0.31980, accu: 0.94500
global step 2000, epoch: 14, batch: 50, loss: 0.00038, acc: 0.99687
global step 2050, epoch: 14, batch: 100, loss: 0.00041, acc: 0.99609
global step 2100, epoch: 14, batch: 150, loss: 0.02918, acc: 0.99542
eval loss: 0.31564, accu: 0.94167
global step 2150, epoch: 15, batch: 50, loss: 0.00064, acc: 0.99531
global step 2200, epoch: 15, batch: 100, loss: 0.00115, acc: 0.99500
global step 2250, epoch: 15, batch: 150, loss: 0.00042, acc: 0.99542
eval loss: 0.33883, accu: 0.94500
global step 2300, epoch: 16, batch: 50, loss: 0.00077, acc: 0.99500
global step 2350, epoch: 16, batch: 100, loss: 0.00024, acc: 0.99547
global step 2400, epoch: 16, batch: 150, loss: 0.00029, acc: 0.99542
eval loss: 0.35941, accu: 0.94250
global step 2450, epoch: 17, batch: 50, loss: 0.00038, acc: 0.99656
global step 2500, epoch: 17, batch: 100, loss: 0.01986, acc: 0.99578
global step 2550, epoch: 17, batch: 150, loss: 0.02258, acc: 0.99604
eval loss: 0.36785, accu: 0.94167
global step 2600, epoch: 18, batch: 50, loss: 0.00060, acc: 0.99531
global step 2650, epoch: 18, batch: 100, loss: 0.02872, acc: 0.99625
global step 2700, epoch: 18, batch: 150, loss: 0.03202, acc: 0.99646
eval loss: 0.37942, accu: 0.94333
global step 2750, epoch: 19, batch: 50, loss: 0.00065, acc: 0.99594
global step 2800, epoch: 19, batch: 100, loss: 0.00027, acc: 0.99641
global step 2850, epoch: 19, batch: 150, loss: 0.03275, acc: 0.99604
eval loss: 0.37395, accu: 0.94417
global step 2900, epoch: 20, batch: 50, loss: 0.00048, acc: 0.99656
global step 2950, epoch: 20, batch: 100, loss: 0.00047, acc: 0.99656
global step 3000, epoch: 20, batch: 150, loss: 0.01033, acc: 0.99656
eval loss: 0.37231, accu: 0.94417

5.模型预测

def predict(model, data, tokenizer, label_map, batch_size=1):
    examples = []
    for text in data:
        input_ids, segment_ids = convert_example(text, tokenizer, label_list=label_map.values(),  max_seq_length=128, is_test=True)
        examples.append((input_ids, segment_ids))

    batchify_fn = lambda samples, fn=Tuple(Pad(axis=0, pad_val=tokenizer.pad_token_id), Pad(axis=0, pad_val=tokenizer.pad_token_id)): fn(samples)
    batches = []
    one_batch = []
    for example in examples:
        one_batch.append(example)
        if len(one_batch) == batch_size:
            batches.append(one_batch)
            one_batch = []
    if one_batch:
        batches.append(one_batch)

    results = []
    model.eval()
    for batch in batches:
        input_ids, segment_ids = batchify_fn(batch)
        input_ids = paddle.to_tensor(input_ids)
        segment_ids = paddle.to_tensor(segment_ids)
        logits = model(input_ids, segment_ids)
        probs = F.softmax(logits, axis=1)
        idx = paddle.argmax(probs, axis=1).numpy()
        idx = idx.tolist()
        labels = [label_map[i] for i in idx]
        results.extend(labels)
    return results
data = ['这个商品虽然看着样式挺好看的,但是不耐用。', '这个老师讲课水平挺高的。']
label_map = {0: '负向情绪', 1: '正向情绪'}

predictions = predict(model, data, tokenizer, label_map, batch_size=32)
for idx, text in enumerate(data):
    print('预测文本: {} \n情绪标签: {}'.format(text, predictions[idx]))
预测文本: 这个商品虽然看着样式挺好看的,但是不耐用。 
情绪标签: 负向情绪
预测文本: 这个老师讲课水平挺高的。 
情绪标签: 正向情绪

开源项目详见:https://aistudio.baidu.com/aistudio/projectdetail/1473504

  • 0
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 9
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值