从datasets导入所有功能
from datasets import *
加载在线数据集
datasets = load_dataset("madao33/new-title-chinese")
datasets
加载数据集合集中某一项任务
boolq_dataset = load_dataset("super_glue", "boolq")
boolq_dataset
按照数据集划分进行加载
dataset = load_dataset("madao33/new-title-chinese", split="train")
dataset
dataset = load_dataset("madao33/new-title-chinese", split="train[10:100]")
dataset
dataset = load_dataset("madao33/new-title-chinese", split="train[:50%]")
dataset
dataset = load_dataset("madao33/new-title-chinese", split=["train[:50%]", "train[50%:]"])
dataset
查看数据集
datasets = load_dataset("madao33/new-title-chinese")
datasets
datasets["train"][0]
datasets["train"][:2]
datasets["train"]["title"][:5]
datasets["train"].column_names
datasets["train"].features
数据集划分
dataset = datasets["train"]
dataset.train_test_split(test_size=0.1)
dataset = boolq_dataset["train"]
dataset.train_test_split(test_size=0.1, stratify_by_column="label")
数据选取与过滤
选取
datasets["train"].select([0, 1])
过滤
filter_dataset = datasets["train"].filter(lambda example: "中国" in example["title"])
filter_dataset["title"][:5]
数据映射
def add_prefix(example):
example["title"] = 'Prefix: ' + example["title"]
return example
prefix_dataset = datasets.map(add_prefix)
prefix_dataset["train"][:10]["title"]
使用transformers库进行预处理
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
def preprocess_function(example, tokenizer=tokenizer):
model_inputs = tokenizer(example["content"], max_length=512, truncation=True)
labels = tokenizer(example["title"], max_length=32, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = datasets.map(preprocess_function)
processed_datasets = datasets.map(preprocess_function, num_proc=4)
processed_datasets = datasets.map(preprocess_function, batched=True)
processed_datasets = datasets.map(preprocess_function, batched=True, remove_columns=datasets["train"].column_names)
保存与加载
processed_datasets.save_to_disk("./processed_data")
processed_datasets = load_from_disk("./processed_data")
processed_datasets
加载本地数据集
直接加载文件作为数据集
dataset = load_dataset("csv", data_files="./ChnSentiCorp_htl_all.csv", split="train")
dataset
加载文件夹内全部文件作为数据集
dataset = load_dataset("csv", data_files=["./all_data/ChnSentiCorp_htl_all.csv", "./all_data/ChnSentiCorp_htl_all copy.csv"], split='train')
dataset
通过预先加载的其他格式转换加载数据集
import pandas as pd
data = pd.read_csv("./ChnSentiCorp_htl_all.csv")
data.head()
dataset = Dataset.from_pandas(data)
dataset
List格式的数据需要内嵌{},明确数据字段
data = [{"text": "abc"}, {"text": "def"}]
Dataset.from_list(data)
通过自定义加载脚本加载数据集
load_dataset("json", data_files="./cmrc2018_trial.json", field="data")
dataset = load_dataset("./load_script.py", split="train")
dataset
dataset[0]
Dataset with DataCollator
from transformers import DataCollatorWithPadding
dataset = datasets.load_dataset("glue", "mnli")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)