from torchtext.data import Field,TabularDataset,Iterator,BucketIterator
'''
声明式加载数据方式
tokenize:分词方式 可以先定义好函数
'''
#Field定义怎么处理原始数据
TEXT = Field(sequential=True,tokenize=lambda x:x.split(),lower=True)
#label 默认是数字类型,并且不是顺序序列
LABEL = Field(sequential=False,use_vocab=False)
#告诉fields处理那些数据
tv_datafields = [("id",None),("comment_text",TEXT),("toxic",LABEL),("severe_toxic",LABEL),("threat",LABEL),("obscene", LABEL), ("insult", LABEL),
("identity_hate", LABEL)]
#splits方法可以同时读取训练集,验证集,测试集
trn,vld = TabularDataset.splits(path='/Users/yangyang/Desktop/practical-torchtext/data',train='train.csv',validation='valid.csv',format='csv',
skip_header=True,fields = tv_datafields)
#加载数据后可以建立词典,建立词典的时候可以使用与训练的word vector
#TEXT.build_vocab(train, vectors="glove.6B.100d")
TEXT.build_vocab(trn)
train_iter,val_iter = BucketIterator.splits((trn,vld),batch_sizes=(25,25),sort_key = lambda x:len(x.comment_text),sort_within_batch = False,repeat = False)
for idx,batch in enumerate(train_iter):
comment_text,toxic = batch.comment_text,batch.toxic
print(toxic)
print(comment_text)
原数据集示例展示:
构建后的DataSet示例: