下面的代码为create_pretraining_data.py的源码,其中使用“#*”开头的中文注释是我对于代码的解释,穿插在代码中间,请读者从该代码的main函数开始阅读,其中用到的每个函数都做了较为详细的解读
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
# 和BERT原论文代码完全一致
# 此处定义了如何将普通文本转换成可用于预训练BERT模型的tfrecord文件的方法。
# 产生可以用于预训练模型的数据,其中包含MLM和NSP任务
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import tokenization
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
flags = tf.flags
FLAGS = flags.FLAGS
'''
input_file:输入文件路径
output_file:输出文件路径
vocab_file:谷歌提供的词典,值为词典的路径
do_lower_case:当值为True时,则忽略大小写
max_seq_length:每一条训练数据(两句话)相加后的最大长度限制
max_predictions_per_seq:每一条训练数据mask的最大数量
random_seed:一个随机种子
dupe_factor:对文档多次重复随机产生训练集,随机的次数
masked_lm_prob:一条训练数据产生mask的概率,即每条训练数据随机产生max_predictions_per_seq乘以masked_lm_prob数量的mask
short_seq_prob:为了缩小预训练和微调过程的差距,以此概率产生小于max_seq_length的训练数据
'''
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", False,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_integer(
"dupe_factor", 10,
"Number of times to duplicate the input data (with different masks).")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
#* 保证已经padding了足够多了
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
#* 如果小于一个句子最大的mask预测量,则
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
'''
input_ids是把tokens转换成词汇表对应的索引(ids)
input_mask表示哪些位置是有真实字符的(用1表示),哪些位置是padding,是1维列表
segment_ids表示A句和B句的对应关系
masked_lm_positions表示哪些位置被掩码了,记录token中的索引,格式为[20,32,......]
masked_lm_ids表示被掩码的索引对应的label,将该label转换为词汇表中的ids记录
masked_lm_weights表示有多少位置被掩码了,列表刚开始部分为1的值证明是有效掩码信息
next_sentence_label表示B句是否是正常的A句后面的句子
'''
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
#* 假设传入了多个文件,每个文件中有多行的句子,则all_documents为二维列表
#* 第一个维度是文档(根据传入的路径),第二个维度为该文档第几行,第三维度为每行的token
for input_file in input_files:
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
#* append单个列表
all_documents[-1].append(tokens)
# Remove empty documents
#* 删除空列表
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
#* 对于每篇文章(document)认为是(dupe_factor)篇文章,一篇文章会重复生成样本(由于有随机数的存在,保证训练内容不会重复)
#* 利用函数create_instances_from_document生成训练实例
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
#* 从每个文档中生成训练数据instance,实例中包括以下信息:
'''
tokens:词,其中包括A句和B句,存在掩码[MASK]
segement_ids:句子编码,第一句为0,第二句为1
is_random_next:第二句是随机查找,还是为第一句的下文
masked_lm_positions:tokens中被mask的位置,[20,32,...]
masked_lm_labels:tokens中被mask的原来的词
返回值:instances
'''
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
#* Account for [CLS], [SEP], [SEP],计算每行的最大token数量
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
#* short_seq_prob是为了缩小预训练和微调过程的差距,以此概率产生小于max_seq_length的训练数据
#* target_seq_length只是一个粗略的目标,max_seq_length才是硬性的指标上限。但是后文都用target_seq_length判断
target_seq_length = max_num_tokens
#* 进行随机数的判断,如果判断小于该概率了,则该篇文章的所有句子的长度上限都会随机一个(2,max_length)的值
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
#* document是二维列表,第一维度为哪行,第二维度为该行对应token
#* 循环document中的每一行(每个句子)
while i < len(document):
segment = document[i]
#* current_chunk也是二位列表,第一维度为行,第二维度为该行对应token,其记录当前处理的所有行信息(可能包含不止一行,由下面294的if决定)
current_chunk.append(segment)
current_length += len(segment)
#* 当循环到了该文章的最后一行或已经扩展的token数大于了当前每句的最大长度,进行截断区分句A和句B
#* 注意,这里的i、a_end等都表示的是行索引,表示哪一行句子要怎样怎样,每个句子中还有多个token,这里考虑的是将句子直接加入某一列表中
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
#* a_end表示NSP任务中句A的末尾位置索引(这里是行索引)
a_end = 1
#* 如果现在的current_chunk中包含行数≥2,那么进行随机截断
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
#* 获取句A的全部token,以一维列表的形式记录
tokens_a = []
for j in range(a_end):
#* extend直接将current_chunk[j]列表中元素添加到tokens_a列表中,所以tokens_a为一维列表
tokens_a.extend(current_chunk[j])
#* 获取句B的全部token,可能是真的句A的后面,或者是其他文章的某个长度的句子,具体看is_random_next的值
#* 将对应的句B结果(token)放tokens_b中
tokens_b = []
# Random next
is_random_next = False
#* 如果是其他文章的句子
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
#* 首先获得句B的最大长度是多少,表示为target_b_length
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
#* 随机获得句B在哪篇文章中,循环十次,只要不是和当前文章的索引一致就可以跳出循环
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#* 从对应的文章中获得句B,句B的起始位置(从哪一行开始)也是随机的,根据target_b_length获得一段token。
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
#* 挺讲究,因为当前句B是随机找的,所以之前句A的正经后续并没有用到,将索引i向前退,不浪费数据
#* 这里向前退是按照行来退的,看a_end代表哪行,然后如果判定句B为随机句,则current_chunk中后面的句子都浪费了,要回退
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
#* 如果是真实的句A的后面,根据之前的current_chunk将句A之后的每行的token按行依次放入句B(tokens_b)中
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
#* 观察句A和句B的长度和是否大于最大约定长度,如果是则逐个删除A或B的首或尾。
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
#* 下面按照句A和句B产生完整的tokens(包含分隔符)以及分割id(segment_ids)
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
#* 根据tokens输出掩码之后的tokens、掩码位置masked_lm_positions、掩码位置的真实词masked_lm_labels。(都是列表)
#* masked_lm_prob为产生mask的概率,max_predictions_per_seq为每个数据产生mask的最大值,一条数据的mask数量为样本token数×掩码概率与最大掩码数量中选一个较小的
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
#* 将掩码后的token, AB句对应的id,B句是否是A的正常后续还是随机后续,掩码的token索引以及掩码对应位置的label传入训练实例中
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
'''
每个输入序列,只有最多15%的token被mask,而其中80%的机会被替换成[MASK],
10%的机会保持原词不变,10%的机会随机替换为字典中的任意词。代码如何实现呢?
先获取每个token的索引位置,然后随机打乱索引位置,接着取前15%的token进行替换即可。
在替换中,再次利用随机函数,实现80%替换为[MASK]等,代码层面利用random函数还是比较巧妙的。
'''
#* 该函数输入当前一个样本的token,掩码概率,每个样本最大掩码的数量(当前样本掩码数量是从token×掩码概率与最大掩码数量中选一个较小的),词汇表,随机数
#* 输出为掩码后原样本的tokens(列表),对应掩码的位置索引masked_lm_positions(列表),以及对应掩码的真实值masked_lm_labels(列表)
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
#* 不掩盖分割符
if token == "[CLS]" or token == "[SEP]":
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word. When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and
token.startswith("##")):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
#* cand_indexes表示tokens列表中所有token的索引(不包含起始符和分隔符),故意弄成了二维列表(感觉没啥意义)
#* 将索引顺序随机打乱
rng.shuffle(cand_indexes)
#* output_token为最终输出的token列表,其中包括[MASK]
output_tokens = list(tokens)
#* masked token数量,从最大mask配置数和seq长度*mask比例中取一个最小数,作为这个seq最终的mask数量
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
#* covered_indexes存放被mask token的索引位置
covered_indexes = set()
#* 因为被打乱顺序了,所以只要取前num_to_predict个索引进行mask就好
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
#* 下面考虑当前要mask的索引是否已经被mask过了,如果是的话则continue,不是则将新的mask索引记录到covered_indexes中
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
#* 下面进行mask操作,80%词替换为[MASK],10%替换为原始值,10%替换为词典中随机词
#* 将掩码后token的结果记录在output_token列表中,并且将掩码的token原索引以及真实label记录在masked_lms中
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
#* 保存被掩码的token的原索引位置以及对应真实的label
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
assert len(masked_lms) <= num_to_predict
#* 按照下标重排,保证是原来句子中出现的顺序
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
#* 将被掩码的索引和原来真实的label分别记录
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
#* 如果句A和句B的长度超过了最大长度,那么就需要删除句A或句B的首或尾token
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
#* input_files就是文件路径组成的列表,表示有多篇文章
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
#* 产生训练实例,方便后面写入,根据相关参数产生掩码的token等信息
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
rng)
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
'''
input_ids是把tokens转换成词汇表对应的索引(ids)
input_mask表示哪些位置是有真实字符的(用1表示),哪些位置是padding,是1维列表
segment_ids表示A句和B句的对应关系
masked_lm_positions表示哪些位置被掩码了,记录token中的索引,格式为[20,32,......]
masked_lm_ids表示被掩码的索引对应的label,将该label转换为词汇表中的ids记录
masked_lm_weights表示有多少位置被掩码了,列表刚开始部分为1的值证明是有效掩码信息
next_sentence_label表示B句是否是正常的A句后面的句子
'''
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq, output_files)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
如果读者在阅读过程中有任何问题,希望在评论区和我进行交流!本人是NLP小白,也希望与各位大佬交流讨论!