使用LoRA(Low-Rank Adaptation)技术对MiniCPM模型进行微调
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, \
DataCollatorForLanguageModeling
from peft import LoraConfig, TaskType, get_peft_model
import torch
dataset = load_dataset(path="text", data_files="corpus.txt")
print("训练集大小:", len(dataset["train"]))
print("一个样本数据:", dataset["train"][0])
print("一个样本数据:", dataset["train"][100])
path = 'openbmb/MiniCPM-2B-sft-bf16'
tokenizer = AutoTokenizer.from_pretrained(path)
tokenizer.pad_token = tokenizer.eos_token
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True, padding=True)
tokenized_dataset = dataset.map(preprocess_function, batched=True, remove_columns=["text"])
train_dataset = tokenized_dataset['train']
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
inference_mode=False, r=8,
lora_alpha=32, lora_dropout=0.1)
model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16, device_map='cuda',
trust_remote_code=True)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
training_args = TrainingArguments(
output_dir="MiniCPM-2B-sft-bf16/peft",
learning_rate=1e-3,
per_device_train_batch_size=1,
num_train_epochs=2,
weight_decay=0.01,
save_strategy="epoch",
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=None,
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.train()
trainer.save_state()
trainer.save_model(output_dir="MiniCPM-2B-sft-bf16/peft")