#学习打卡第23天# 基于MindSpore通过GPT实现情感分类
1. 数据集构建
import os
import numpy as np
import mindspore
from mindspore.dataset import text, GeneratorDataset, transforms
from mindspore import nn
from mindnlp.dataset import load_dataset
from mindnlp.transformers import GPTTokenizer
def process_dataset(dataset, tokenizer, max_seq_len=512, batch_size=4, shuffle=False):
is_ascend = mindspore.get_context('device_target') == 'Ascend'
def tokenize(text):
if is_ascend:
tokenized = tokenizer(text, padding='max_length', truncation=True, max_length=max_seq_len)
else:
tokenized = tokenizer(text, truncation=True, max_length=max_seq_len)
return tokenized['input_ids'], tokenized['attention_mask']
if shuffle:
dataset = dataset.shuffle(batch_size)
# map dataset
dataset = dataset.map(operations=[tokenize], input_columns="text", output_columns=['input_ids', 'attention_mask'])
dataset = dataset.map(operations=transforms.TypeCast(mindspore.int32), input_columns="label", output_columns="labels")
# batch dataset
if is_ascend:
dataset = dataset.batch(batch_size)
else:
dataset = dataset.padded_batch(batch_size, pad_info={'input_ids': (None, tokenizer.pad_token_id),
'attention_mask': (None, 0)})
return dataset
# tokenizer
gpt_tokenizer = GPTTokenizer.from_pretrained('openai-gpt')
# add sepcial token: <PAD>
special_tokens_dict = {
"bos_token": "<bos>",
"eos_token": "<eos>",
"pad_token": "<pad>",
}
num_added_toks = gpt_tokenizer.add_special_tokens(special_tokens_dict)
imdb_ds = load_dataset('imdb', split=['train', 'test'])
imdb_train = imdb_ds['train']
imdb_test = imdb_ds['test']
# split train dataset into train and valid datasets
imdb_train, imdb_val = imdb_train.split([0.7, 0.3])
dataset_train = process_dataset(imdb_train, gpt_tokenizer, shuffle=True)
dataset_val = process_dataset(imdb_val, gpt_tokenizer)
dataset_test = process_dataset(imdb_test, gpt_tokenizer)
2. 模型构建
from mindnlp.transformers import GPTForSequenceClassification
from mindspore.experimental.optim import Adam
# set bert config and define parameters for training
model = GPTForSequenceClassification.from_pretrained('openai-gpt', num_labels=2)
model.config.pad_token_id = gpt_tokenizer.pad_token_id
model.resize_token_embeddings(model.config.vocab_size + 3)
optimizer = nn.Adam(model.trainable_params(), learning_rate=2e-5)
metric = Accuracy()
# define callbacks to save checkpoints
ckpoint_cb = CheckpointCallback(save_path='checkpoint', ckpt_name='gpt_imdb_finetune', epochs=1, keep_checkpoint_max=2)
best_model_cb = BestModelCallback(save_path='checkpoint', ckpt_name='gpt_imdb_finetune_best', auto_load=True)
trainer = Trainer(network=model, train_dataset=dataset_train,
eval_dataset=dataset_train, metrics=metric,
epochs=1, optimizer=optimizer, callbacks=[ckpoint_cb, best_model_cb],
jit=False)
trainer.run(tgt_columns="labels")
3. 模型评估
evaluator = Evaluator(network=model, eval_dataset=dataset_test, metrics=metric)
evaluator.run(tgt_columns="labels")
4. 学习心得
GPT模型通过在大规模语料库上预训练,学习了丰富的语言表示和上下文信息。在情感分类任务中,GPT模型可以被微调以识别和分类文本中的情感倾向,如正面、负面或中性。微调时,模型会学习如何从输入文本中提取与情感相关的特征,并将其映射到预定义的情感类别上。这种方法的优势在于,GPT模型能够捕捉文本中的复杂语义和上下文关系,从而提高情感分类的准确性和鲁棒性。
本学习案例中,使用MindSpore封装的GPTForSequenceClassification模型来实现微调训练,最后在测试评估数据集上达到了 'Accuracy': 0.9248