ptb数据集
PTB(Penn Treebank Dataset)文本数据集是目前语言模型学习中使用最为广泛的数据集。
数据的下载地址:http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
目前只需关心data文件夹下的三个文件:
- ptb.test.txt # 测试集数据文件
- ptb.train.txt # 训练集数据文件
- ptb.valid.txt # 验证集数据文件
这三个数据文件中的数据已经过预处理,相邻单词之间用空格隔开。数据集共包含了9998个不同的单词词汇,加上稀有词语的特殊符号 和语句结束标记符在内,一共是10000个词汇。
为了将文本转化为模型可以读入的单词序列,需要将这10000个不同的词汇分别映射到0~9999之间的整数编号。下面的辅助程序首先按照词频顺序为每个词汇分配一个编号,然后将词汇表保存到一个独立的vocab文件中。
darts中rnn部分数据读取代码解读
论文:DARTS: Differentiable Architecture Search
源代码地址:https://github.com/quark0/darts
以下是文件rnn/data.py中的代码。此部分代码实现了如下功能:
- Dictionary实现词频统计、构建词表、word2idx、idx2word
- Corpus实现对全文的tokenize,构建一个全文长度的LongTensor
- SentCorpus实现对每一行的tokenize,并生成一个list。
- BatchSentLoader对一个SentCorpus产生的列表按句子长度排序,并返回一个迭代器,根据batch_size迭代数据
下面针对代码中重要的内容添加了注释
import os
import torch
from collections import Counter
class Dictionary(object):
# 字典,可以实现词与数字id的双向转换
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0 # 单词的总数
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens) # 整篇文章变成一个tensor,不分行设若干个tensor
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word] # 每个位置的单词是哪个单词(id)
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = [] # 所有行的向量,每行的长度都不一样
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line: # 为空行
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words)) # 每行的向量
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents # 加载每行的tensor
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0)) # 按句子单词数从少到多排序
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
# 把一个类作为一个迭代器使用需要在类中实现两个方法 __iter__() 与 __next__() 。
def __next__(self):
# __next__() 方法会返回下一个迭代器对象。
if self.idx >= len(self.sort_sents):
raise StopIteration # 遍历数据完成
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size] # 获取一个batch长的数据。(10句话,每句话的长度又不一样,但是sort后已经按长度排序了)
max_len = max([s.size(0) for s in batch]) # 获取batch中长度最长的句子
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id) # max_len x batch_size , torch.size([3, 10])。 用pad来填充
for i in range(len(batch)):
s = batch[i] # 每句话
tensor[:s.size(0),i].copy_(s) # 每句话添加到tensor中,每句话的最后面是pad
# 例如第一句赋值结束后,tensor值为
# 454 0 0 0 0 0 0 0 0 0
# 24 0 0 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 0
# 倒数第三句赋值结束后,tensor值为
# 454 7535 3251 4032 1780 1732 740 101 0 0
# 24 24 24 24 24 26 2799 1753 0 0
# 0 0 0 0 0 24 24 24 0 0
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
# __iter__() 方法返回一个特殊的迭代器对象, 这个迭代器对象实现了 __next__() 方法并通过 StopIteration 异常标识迭代的完成。
self.idx = 0
return self
if __name__ == '__main__':
corpus = SentCorpus('/workspace/darts/data/penn') # 按行加载tensor
loader = BatchSentLoader(corpus.test, 10) # 只加载test数据集,bsz是10,返回一个迭代器
for i, d in enumerate(loader):
# for ... in ...这个语句其实做了两件事:第一:获得一个可迭代器,即调用了 __iter__()函数,第二:循环过程,即循环调用__next__()函数
print(i, d.size())
# 0 torch.Size([3, 10]) # [这一批次最长的长度 , batch_size]
# 1 torch.Size([3, 10])
# 2 torch.Size([3, 10])
# 3 torch.Size([3, 10])
# 4 torch.Size([4, 10])
# 5 torch.Size([4, 10])
# ………………
# 369 torch.Size([46, 10])
# 370 torch.Size([47, 10])
# 371 torch.Size([49, 10])
# 372 torch.Size([50, 10])
# 373 torch.Size([53, 10])
# 374 torch.Size([55, 10])
# 375 torch.Size([65, 10])
# 376 torch.Size([78, 1])