前言
安装环境
!pip install tokenizers==0.15.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
# 该案例在 mindnlp 0.3.1 版本完成适配,如果发现案例跑不通,可以指定mindnlp版本,执行`!pip install mindnlp==0.3.1`
!pip install mindnlp
数据加载
from mindnlp.utils import http_get
# download dataset
url = 'https://download.mindspore.cn/toolkits/mindnlp/dataset/text_generation/nlpcc2017/train_with_summ.txt'
path = http_get(url, './')
数据预处理
原始数据格式:
article: [CLS] article_context [SEP]
summary: [CLS] summary_context [SEP]
预处理后的数据格式:
[CLS] article_context [SEP] summary_context [SEP]
import json
import numpy as np
# preprocess dataset
def process_dataset(dataset, tokenizer, batch_size=6, max_seq_len=1024, shuffle=False):
def read_map(text):
data = json.loads(text.tobytes())
return np.array(data['article']), np.array(data['summarization'])
def merge_and_pad(article, summary):
# tokenization
# pad to max_seq_length, only truncate the article
tokenized = tokenizer(text=article, text_pair=summary,
padding='max_length', truncation='only_first', max_length=max_seq_len)
return tokenized['input_ids'], tokenized['input_ids']
dataset = dataset.map(read_map, 'text', ['article', 'summary'])
# change column names to input_ids and labels for the following training
dataset = dataset.map(merge_and_pad, ['article', 'summary'], ['input_ids', 'labels'])
dataset = dataset.batch(batch_size)
if shuffle:
dataset = dataset.shuffle(batch_size)
return dataset
模型构建
from mindspore import ops
from mindnlp.transformers import GPT2LMHeadModel
class GPT2ForSummarization(GPT2LMHeadModel):
def construct(
self,
input_ids = None,
attention_mask = None,
labels = None,
):
outputs = super().construct(input_ids=input_ids, attention_mask=attention_mask)
shift_logits = outputs.logits[..., :-1, :]
shift_labels = labels[..., 1:]
# Flatten the tokens
loss = ops.cross_entropy(shift_logits.view(-1, shift_logits.shape[-1]), shift_labels.view(-1), ignore_index=tokenizer.pad_token_id)
return loss
模型训练
模型推理
def process_test_dataset(dataset, tokenizer, batch_size=1, max_seq_len=1024, max_summary_len=100):
def read_map(text):
data = json.loads(text.tobytes())
return np.array(data['article']), np.array(data['summarization'])
def pad(article):
tokenized = tokenizer(text=article, truncation=True, max_length=max_seq_len-max_summary_len)
return tokenized['input_ids']
dataset = dataset.map(read_map, 'text', ['article', 'summary'])
dataset = dataset.map(pad, 'article', ['input_ids'])
dataset = dataset.batch(batch_size)
return dataset
总结
使用mindnlp库实现GPT2模型进行文本摘要,采用BertTokenizer进行分词, 使用线性预热和衰减的学习率策略进行模型训练. 通过多种数据预处理和模型优化技术, 训练并部署模型进行文本摘要推理.