datasets加载,上一篇我的博客中已经介绍了大模型的加载,基于上一篇的tokenizer加载,这一篇简述dataset的加载预处理过程,我的数据是json格式
from datasets import load_dataset
dataset = load_dataset("json", data_files="你的数据.json", split=["train","test"])
def Preprocess(example):
MAX_LEN = 128
input_ids, attention_mask, labels = [], [], []
instruction = tokenizer(f"User: {example['instruction']}Assistant: ", add_special_tokens=False)
response = tokenizer(example["output"] + tokenizer.eos_token, add_special_tokens=False)
input_ids = instruction["input_ids"] + response["input_ids"]
attention_mask = instruction["attention_mask"] + response["attention_mask"]
labels = [-1] * len(instruction["input_ids"]) + response["input_ids"]
if len(input_ids) > MAX_LEN:
input_ids = input_ids[:MAX_LEN]
attention_mask = attention_mask[:MAX_LEN]
labels = labels[:MAX_LEN]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels
}
dataset = dataset.map(Preprocess, remove_columns=dataset.column_names).shuffle()
其中可供参考的json配置,比如我的数据是以下这样的:
[
{
"instruction": "你的问题",
"output": "大模型的答案。"
},
{
"instruction": "你的问题第二个",
"output": "大模型对应的答案。"
}
]