NLP data处理
主要总结一下,NLP数据的处理过程
NLP数据的处理主要有分词,然后记录每个词出现的次数,每个词对应的id,word2id,id2word。python有很多库如from collections import Counter,scipy,jieba等配合使用
pytorch dataset
file = open('train.txt','r')
i = 0
data,tag,sentence_lst = [],[],[]
word_to_idx,idx_to_word = {},{}
for line in file:
lst=line.strip().split('|||')
if len(lst)!=2: #确保上一步拆分成句子和类别的元素才进行下面的操作,保证成功
continue
sentence,label= lst[0],lst[1] #提取出句子和类别
words=sentence.split(' ')#在中文中,这一步可以使用结巴分词操作
for word in words:
if word not in word_to_idx:
word_to_idx[word] = i
idx_to_word[i] = word
i=i+1
sentence_lst.append(word_to_idx[word])
if (len(sentence_lst)>=50):
break
for j in range(50-len(sentence_lst)):
sentence_lst.append(0) #这之上的操作完成了两件事情,一个是构建词汇表,二就是将句子向量化
data.append(sentence_lst)
tag.append([int(label)])
sentence_lst = []
#将list转化为数组
data=np.array(data)
tag=np.array(tag) #这个步骤很重要,就是把我们生成的list转化为数组的形式
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
Data =data
Label = tag
#创建子类
class subDataset(Dataset):
#初始化,定义数据内容和标签
def __init__(self, Data, Label):
self.Data = Data
self.Label = Label
#返回数据集大小
def __len__(self):
return len(self.Data)
#得到数据内容和标签
def __getitem__(self, index):
data = torch.Tensor(self.Data[index])
label = torch.IntTensor(self.Label[index])
return data, label
dataset = subDataset(Data, Label)
print(dataset)
print('dataset大小为:', dataset.__len__())
print(dataset.__getitem__(0))
print(dataset[0])
dataloader =DataLoader(dataset,batch_size= 100, shuffle = False, num_workers= 4)
for i, item in enumerate(dataloader):
data, label = item
data=data.long()
label=label.squeeze(1).long()
outputs = model(sentence.long())
loss = criterion(outputs, label.squeeze(1).long())
torchtext
torchtext包含以下组件:
Field :主要包含以下数据预处理的配置信息,比如指定分词方法,是否转成小写,起始字符,结束字符,补全字符以及词典等等
Dataset :继承自pytorch的Dataset,用于加载数据,提供了TabularDataset可以指点路径,格式,Field信息就可以方便的完成数据加载。同时torchtext还提供预先构建的常用数据集的Dataset对象,可以直接加载使用,splits方法可以同时加载训练集,验证集和测试集。
Iterator : 主要是数据输出的模型的迭代器,可以支持batch定制
Field包含以下的参数:
sequential: 是否把数据表示成序列,如果是False, 不能使用分词 默认值: True.
use_vocab: 是否使用词典对象. 如果是False 数据的类型必须已经是数值类型. 默认值: True.
init_token: 每一条数据的起始字符 默认值: None.
eos_token: 每条数据的结尾字符 默认值: None.
fix_length: 修改每条数据的长度为该值,不够的用pad_token补全. 默认值: None.
tensor_type: 把数据转换成的tensor类型 默认值: torch.LongTensor.
preprocessing:在分词之后和数值化之前使用的管道 默认值: None.
postprocessing: 数值化之后和转化成tensor之前使用的管道默认值: None.
lower: 是否把数据转化为小写 默认值: False.
tokenize: 分词函数. 默认值: str.split.
include_lengths: 是否返回一个已经补全的最小batch的元组和和一个包含每条数据长度的列表 . 默认值: False.
batch_first: Whether to produce tensors with the batch dimension first. 默认值: False.
pad_token: 用于补全的字符. 默认值: “”.
unk_token: 不存在词典里的字符. 默认值: “”.
pad_first: 是否补全第一个字符. 默认值: False.
重要的几个方法:
pad(minibatch): 在一个batch对齐每条数据
build_vocab(): 建立词典
numericalize(): 把文本数据数值化,返回tensor
import torchtext
from torchtext import data
from torchtext.vocab import Vectors
from torch.nn import init
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchtext.data import Iterator, BucketIterator
pd.read_csv("data/train_one_label.csv").head(2)
tokenize = lambda x: x.split()
TEXT = data.Field(sequential=True, tokenize=tokenize, lower=True, fix_length=200)
LABEL = data.Field(sequential=False, use_vocab=False)
train_path = 'data/train_one_label.csv'
valid_path = "data/valid_one_label.csv"
test_path = "data/test.csv"
然后利用Dataset构建数据集
官方
# 读取数据
train_data = pd.read_csv('data/train_one_label.csv')
valid_data = pd.read_csv('data/valid_one_label.csv')
test_data = pd.read_csv("data/test.csv")
TEXT = data.Field(sequential=True, tokenize=tokenize, lower=True)
LABEL = data.Field(sequential=False, use_vocab=False)
# get_dataset构造并返回Dataset所需的examples和fields
def get_dataset(csv_data, text_field, label_field, test=False):
# id数据对训练在训练过程中没用,使用None指定其对应的field
fields = [("id", None), # we won't be needing the id, so we pass in None as the field
("comment_text", text_field), ("toxic", label_field)]
examples = []
if test:
# 如果为测试集,则不加载label
for text in tqdm(csv_data['comment_text']):
examples.append(data.Example.fromlist([None, text, None], fields))
else:
for text, label in tqdm(zip(csv_data['comment_text'], csv_data['toxic'])):
examples.append(data.Example.fromlist([None, text, label], fields))
return examples, fields
# 得到构建Dataset所需的examples和fields
train_examples, train_fields = get_dataset(train_data, TEXT, LABEL)
valid_examples, valid_fields = get_dataset(valid_data, TEXT, LABEL)
test_examples, test_fields = get_dataset(test_data, TEXT, None, test=True)
# 构建Dataset数据集
train = data.Dataset(train_examples, train_fields)
valid = data.Dataset(valid_examples, valid_fields)
test = data.Dataset(test_examples, test_fields)
自定义
当构建简单的数据集时,可直接使用torch.text.Dataset来构建,当对原始数据集只进行简单的划分处理时,例如读取数据并划分训练集验证集等操作,也可以直接使用TabularDataset类和split类方法来实现,该类支持读取csv,tsv等格式。但是当我们需要对数据进行更多的预处理时,例如shuffle,dropout等数据增强操作时,自定义Dataset会更灵活。
from torchtext import data
from torchtext.vocab import Vectors
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
import random
import os
train_path = 'data/train_one_label.csv'
valid_path = "data/valid_one_label.csv"
test_path = "data/test.csv"
#定义Dataset
class MyDataset(data.Dataset):
def __init__(self, path, text_field, label_field, test=False, aug=False, **kwargs):
fields = [("id", None), # we won't be needing the id, so we pass in None as the field
("comment_text", text_field), ("toxic", label_field)]
examples = []
csv_data = pd.read_csv(path)
print('read data from {}'.format(path))
if test:
# 如果为测试集,则不加载label
for text in tqdm(csv_data['comment_text']):
examples.append(data.Example.fromlist([None, text, None], fields))
else:
for text, label in tqdm(zip(csv_data['comment_text'], csv_data['toxic'])):
if aug:
# do augmentation
rate = random.random()
if rate > 0.5:
text = self.dropout(text)
else:
text = self.shuffle(text)
# Example: Defines a single training or test example.Stores each column of the example as an attribute.
examples.append(data.Example.fromlist([None, text, label - 1], fields))
# 之前是一些预处理操作,此处调用super初始化父类,构造自定义的Dataset类。
super(MyDataset, self).__init__(examples, fields, **kwargs)
def shuffle(self, text):
text = np.random.permutation(text.strip().split())
return ' '.join(text)
def dropout(self, text, p=0.5):
# random delete some text
text = text.strip().split()
len_ = len(text)
indexs = np.random.choice(len_, int(len_ * p))
for i in indexs:
text[i] = ''
return ' '.join(text)
iteration
from torchtext.data import Iterator, BucketIterator
# 若只针对训练集构造迭代器
# train_iter = data.BucketIterator(dataset=train, batch_size=8, shuffle=True, sort_within_batch=False, repeat=False)
# 同时对训练集和验证集进行迭代器的构建
train_iter, val_iter = BucketIterator.splits(
(train, valid), # 构建数据集所需的数据集
batch_sizes=(8, 8),
device=-1, # 如果使用gpu,此处将-1更换为GPU的编号
sort_key=lambda x: len(x.comment_text), # the BucketIterator needs to be told what function it should use to group the data.
sort_within_batch=False,
repeat=False # we pass repeat=False because we want to wrap this Iterator layer.
)
test_iter = Iterator(test, batch_size=8, device=-1, sort=False, sort_within_batch=False, repeat=False)
BucketIterator相比Iterator的优势是会自动选取样本长度相似的数据来构建批数据。但是在测试集中一般不想改变样本顺序,因此测试集使用Iterator迭代器来构建。
iterator
import os
import torch
import numpy as np
import pickle as pkl
from tqdm import tqdm
import time
from datetime import timedelta
MAX_VOCAB_SIZE = 10000
UNK, PAD = '<UNK>', '<PAD>'
def build_vocab(file_path, tokenizer, max_size, min_freq):
vocab_dic = {}
with open(file_path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content = lin.split('\t')[0]
for word in tokenizer(content):
vocab_dic[word] = vocab_dic.get(word, 0) + 1
vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
return vocab_dic
def build_dataset(config, ues_word):
if ues_word:
tokenizer = lambda x: x.split(' ') # 以空格隔开,word-level
else:
tokenizer = lambda x: [y for y in x] # char-level
if os.path.exists(config.vocab_path):
vocab = pkl.load(open(config.vocab_path, 'rb'))
else:
vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
pkl.dump(vocab, open(config.vocab_path, 'wb'))
print(f"Vocab size: {len(vocab)}")
def biGramHash(sequence, t, buckets):
t1 = sequence[t - 1] if t - 1 >= 0 else 0
return (t1 * 14918087) % buckets
def triGramHash(sequence, t, buckets):
t1 = sequence[t - 1] if t - 1 >= 0 else 0
t2 = sequence[t - 2] if t - 2 >= 0 else 0
return (t2 * 14918087 * 18408749 + t1 * 14918087) % buckets
def load_dataset(path, pad_size=32):
contents = []
with open(path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content, label = lin.split('\t')
words_line = []
token = tokenizer(content)
seq_len = len(token)
if pad_size:
if len(token) < pad_size:
token.extend([PAD] * (pad_size - len(token)))
else:
token = token[:pad_size]
seq_len = pad_size
# word to id
for word in token:
words_line.append(vocab.get(word, vocab.get(UNK)))
# fasttext ngram
buckets = config.n_gram_vocab
bigram = []
trigram = []
# ------ngram------
for i in range(pad_size):
bigram.append(biGramHash(words_line, i, buckets))
trigram.append(triGramHash(words_line, i, buckets))
# -----------------
contents.append((words_line, int(label), seq_len, bigram, trigram))
return contents # [([...], 0), ([...], 1), ...]
train = load_dataset(config.train_path, config.pad_size)
dev = load_dataset(config.dev_path, config.pad_size)
test = load_dataset(config.test_path, config.pad_size)
return vocab, train, dev, test
class DatasetIterater(object):
def __init__(self, batches, batch_size, device):
self.batch_size = batch_size
self.batches = batches
self.n_batches = len(batches) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(batches) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
# xx = [xxx[2] for xxx in datas]
# indexx = np.argsort(xx)[::-1]
# datas = np.array(datas)[indexx]
x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
bigram = torch.LongTensor([_[3] for _ in datas]).to(self.device)
trigram = torch.LongTensor([_[4] for _ in datas]).to(self.device)
# pad前的长度(超过pad_size的设为pad_size)
seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
return (x, seq_len, bigram, trigram), y
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.batches[self.index * self.batch_size: len(self.batches)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_iterator(dataset, config):
iter = DatasetIterater(dataset, config.batch_size, config.device)
return iter
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
if __name__ == "__main__":
'''提取预训练词向量'''
vocab_dir = "./THUCNews/data/vocab.pkl"
pretrain_dir = "./THUCNews/data/sgns.sogou.char"
emb_dim = 300
filename_trimmed_dir = "./THUCNews/data/vocab.embedding.sougou"
word_to_id = pkl.load(open(vocab_dir, 'rb'))
embeddings = np.random.rand(len(word_to_id), emb_dim)
f = open(pretrain_dir, "r", encoding='UTF-8')
for i, line in enumerate(f.readlines()):
# if i == 0: # 若第一行是标题,则跳过
# continue
lin = line.strip().split(" ")
if lin[0] in word_to_id:
idx = word_to_id[lin[0]]
emb = [float(x) for x in lin[1:301]]
embeddings[idx] = np.asarray(emb, dtype='float32')
f.close()
np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
加载词向量
cache = 'mycache'
if not os.path.exists(cache):
os.mkdir(cache)
vectors = Vectors(name="/data/zhangyong/nlp/glove/glove.6B.50d.txt", cache=cache)
# 指定 Vector 缺失值的初始化方式,没有命中的token的初始化方式
vectors.unk_init = init.uniform_
TEXT.build_vocab(new_corpus,min_freq=5,vectors=vectors)
# 通过pytorch创建的Embedding层
embedding = nn.Embedding(2000, 256)
# 指定嵌入矩阵的初始权重
weight_matrix = TEXT.vocab.vectors
embedding.weight.data.copy_(weight_matrix )
# 指定预训练权重的同时设定requires_grad=True
# embeddings.weight = nn.Parameter(embeddings, requires_grad=True)