一、目录
- 代码讲解
二、实现。
1、代码讲解,trainer 实现。
transformers通过trainer 集成deepspeed功能,所以中需要进行文件配置,即可实现deepspeed的训练。
微调代码: 参数定义—>数据处理---->模型创建/评估方式---->trainer 框架训练
注意: V100 显卡,不包括float16 精度训练。
import deepspeed
deepspeed.ops.op_builder.CPUAdamBuilder().load()
import nltk
import torch
import evaluate
import datasets
import numpy as np
from nltk.tokenize import sent_tokenize
from torch.nn.utils.rnn import pad_sequence
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments
nltk.download("punkt")
import gc
import torch
######################################定义参数####################################
dataset_name = "samsum" # 数据集名称
#model_name="google/flan-t5-xxl" # 模型名称
model_name="google/flan-t5-xl" # 模型名称
max_input_length = 256
max_gen_length = 128
output_dir = "checkpoints"
num_train_epochs = 5
learning_rate = 5e-5
deepspeed_config = "ds_config.json" # deepspeed配置文件
per_device_train_batch_size=5 # batch size设置为1,因为太大导致OOM
per_device_eval_batch_size=5
gradient_accumulation_steps=10 # 由于单卡的batch size为1,为了扩展batch size,使用梯度累加
#################################加载数据集,与数据预处理#########################################
tokenizer = AutoTokenizer.from_pretrained(model_name)
dataset = datasets.load_dataset(dataset_name)
print(dataset["train"][0])
def preprocess(examples):
dialogues = ["summarize:" + dia for dia in examples["dialogue"]]
# summaries = [summ for summ in examples["summary"]]
model_inputs = tokenizer(dialogues, max_length=max_input_length, truncation=True)
labels = tokenizer(text_target=examples["summary"], max_length=max_gen_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
tokenized_dataset = dataset.map(preprocess, batched=True, remove_columns=["dialogue", "summary", "id"])
# print(tokenized_dataset["train"]["input_ids"][0]) # 打印结果 对map后的数据进行查看。
def collate_fn(features):
batch_input_ids = [torch.LongTensor(feature["input_ids"]) for feature in features]
batch_attention_mask = [torch.LongTensor(feature["attention_mask"]) for feature in features]
batch_labels = [torch.LongTensor(feature["labels"]) for feature in features]
batch_input_ids = pad_sequence(batch_input_ids, batch_first=True, padding_value=tokenizer.pad_token_id)
batch_attention_mask = pad_sequence(batch_attention_mask, batch_first=True, padding_value=0)
batch_labels = pad_sequence(batch_labels, batch_first=True, padding_value=-100)
return {
"input_ids": batch_input_ids,
"attention_mask": batch_a