
P-Tuning: 在Prompt-Tuning的基础上,对Prompt部分进行进一步的编码计算,加速收敛。具体来说,PEFT中支持两种编码方式,一种是LSTM,一种是MLP。与Prompt-Tuning不同的是,Prompt的形式只有Soft Prompt。
思路: P-Tuning 引入了一个轻量级的编码器(如 LSTM 或 MLP)来动态生成提示嵌入。编码器的输入通常是任务相关的特征(例如输入序列的上下文信息),输出则是虚拟 token 的连续表示。
优点: Prompt-Tuning 是静态的,直接随机将一组向量与用户输入相加;而P-Tuning,引入了 LSTM、MLP,来动态嵌入这些提示,使得这些提示跟输入的上下文相关联。
from datasets import load_from_disk
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq
from transformers import TrainingArguments, Trainer
from peft import PromptEncoderConfig, TaskType, get_peft_model, PromptEncoderReparameterizationType
# 分词器
tokenizer = AutoTokenizer.from_pretrained("Langboat/bloom-1b4-zh")
# 函数内将instruction和response拆开分词的原因是:
# 为了便于mask掉不需要计算损失的labels, 即代码labels = [-100] * len(instruction["input_ids"]) + response["input_ids"]
def process_func(example):
MAX_LENGTH = 256
input_ids, attention_mask, labels = [], [], []
instruction = tokenizer("\n".join(["Human: " + example["instruction"], example["input"]]).strip() + "\n\nAssistant: ")
response = tokenizer(example["output"] + tokenizer.eos_token)
input_ids = instruction["input_ids"] + response["input_ids"]
attention_mask = instruction["attention_mask"] + response["attention_mask"]
labels = [-100] * len(instruction["input_ids"]) + response["input_ids"]
if len(input_ids) > MAX_LENGTH:
input_ids = input_ids[:MAX_LENGTH]
attention_mask = attention_mask[:MAX_LENGTH]
labels = labels[:MAX_LENGTH]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels
}
if __name__ == "__main__":
# 加载数据集
dataset = load_from_disk("/root/StudyLLM/prompt/03-PEFT/data/alpaca_data_zh")
# 处理数据
tokenized_ds = dataset.map(process_func, remove_columns = dataset.column_names)
# print(tokenizer.decode(tokenized_ds[1]["input_ids"]))
# print(tokenizer.decode(list(filter(lambda x: x != -100, tokenized_ds[1]["labels"]))))
# 创建模型
model = AutoModelForCausalLM.from_pretrained("Langboat/bloom-1b4-zh", low_cpu_mem_usage=True)
# 设置 P-Tuning
# 使用 MLP
config = PromptEncoderConfig(task_type=TaskType.CAUSAL_LM, num_virtual_tokens=10,
encoder_reparameterization_type=PromptEncoderReparameterizationType.MLP,
encoder_hidden_size=1024)
# 使用LSTM
config = PromptEncoderConfig(task_type=TaskType.CAUSAL_LM, num_virtual_tokens=10,
encoder_reparameterization_type=PromptEncoderReparameterizationType.LSTM,
encoder_dropout=0.1, encoder_num_layers=1, encoder_hidden_size=1024)
model = get_peft_model(model, config) # 生成P-Tuning对应的model
print(model.print_trainable_parameters())
# 训练参数
args = TrainingArguments(
output_dir = "/tmp_1203",
per_device_train_batch_size = 1,
gradient_accumulation_steps = 8,
logging_steps = 10,
num_train_epochs = 1
)
# trainer
trainer = Trainer(
model = model,
args = args,
train_dataset = tokenized_ds,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer, padding = True)
)
# 训练模型
trainer.train()
# 模型推理
model = model.cuda()
ipt = tokenizer("Human: {}\n{}".format("考试有哪些技巧?", "").strip() + "\n\nAssistant: ", return_tensors="pt").to(model.device)
print(tokenizer.decode(model.generate(**ipt, max_length=128, do_sample=True)[0], skip_special_tokens=True))
1万+

被折叠的 条评论
为什么被折叠?



