# coding=utf-8# bert注解版# raw author: Google# explain author:putdoor"""BERT finetuning runner."""from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import random
import optimization # 优化器import tokenization # 令牌化import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# 用于支持命令行传递参数:如:python test_flags.py --model "My model"## Required parameters # 这5项参数为必填参数,把None改为自己所需要
flags.DEFINE_string("data_dir","/home/work/my_model/bert/corpus/sentiment",# 此sentiment下应有三个文件,名称分别为:train.tsv, eval.tsv, test.tsv"The input data dir. Should contain the .tsv files (or other data files) ""for the task.")
flags.DEFINE_string("bert_config_file", os.path.join(os.path.dirname(os.path.abspath(__file__)),'chinese_L-12_H-768_A-12/bert_config.json'),"The config json file corresponding to the pre-trained BERT model. ""This specifies the model architecture.")
flags.DEFINE_string("task_name","sentiment","The name of the task to train.")# 自己随意起一个便于标识processor类的任务名,后续需填入main函数的字典
flags.DEFINE_string("vocab_file", os.path.join(os.path.dirname(os.path.abspath(__file__)),'chinese_L-12_H-768_A-12/vocab.txt'),"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("output_dir","/home/work/my_model/output","The output directory where the model checkpoints will be written.")# Other parameters
flags.DEFINE_string("init_checkpoint",None,"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool("do_lower_case",True,"Whether to lower case the input text. Should be True for uncased ""models and False for cased models.")
flags.DEFINE_integer("max_seq_length",128,"The maximum total input sequence length after WordPiece tokenization. ""Sequences longer than this will be truncated, and sequences shorter ""than this will be padded.")
flags.DEFINE_bool("do_train",False,"Whether to run training.")
flags.DEFINE_bool("do_eval",False,"Whether to run eval on the dev set.")
flags.DEFINE_bool("do_predict",False,"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size",32,"Total batch size for training.")# mini-batch方式的梯度下降,每批处理32个样本
flags.DEFINE_integer("eval_batch_size",8,"Total batch size for eval.")# batch为每批样本的数量,每批更新一次权重,使loss最小
flags.DEFINE_integer("predict_batch_size",8,"Total batch size for predict.")
flags.DEFINE_float("learning_rate",5e-5,"The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs",3.0,# 所有样本轮一次,即为一个epoch,增大此值,计算量增大,一个20000条数据的二分类问题,epochs=4大概要10分钟(16G 单GPU)"Total number of training epochs to perform.")
flags.DEFINE_float("warmup_proportion",0.1,# 预热训练中,线性地增加学习率,详见注意力机制部分"Proportion of training to perform linear learning rate warmup for. ""E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps",1000,# 保存检查点时的步数,达到1000时,保存一次模型"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop",1000,# 在每个estimator调用中执行多少步骤"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu",False,"Whether to use TPU or GPU/CPU.")# TPU config:
tf.flags.DEFINE_string("tpu_name",None,"The Cloud TPU to use for training. This should be either the name ""used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 ""url.")
tf.flags.DEFINE_string("tpu_zone",None,"[Optional] GCE zone where the Cloud TPU is located in. If not ""specified, we will attempt to automatically detect the GCE project from ""metadata.")
tf.flags.DEFINE_string("gcp_project",None,"[Optional] Project name for the Cloud TPU-enabled project. If not ""specified, we will attempt to automatically detect the GCE project from ""metadata.")
tf.flags.DEFINE_string("master",None,"[Optional] TensorFlow master URL.")
flags.DEFINE_integer("num_tpu_cores",8,"Only used if `use_tpu` is True. Total number of TPU cores to use.")classInputExample(object):# 每一行数据 to Inputexample对象"""A single training/test example for simple sequence classification."""def__init__(self, guid, text_a, text_b=None, label=None):"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
classPaddingInputExample(object):"""当需要使用TPU训练时,eval和predict的数据需要是batch_size的整数倍,此类用于处理这类情况"""classInputFeatures(object):"""A single set of features of data."""def__init__(self,
input_ids,# 输入部分:token embedding:表示词向量,第一个词是CLS,分隔词有SEP,是单词本身
input_mask,# 输入部分:position embedding:为了令transformer感知词与词之间的位置关系
segment_ids,# 输入部分:segment embedding:text_a与text_b的句子关系
label_id,# 输出部分:标签,对应Y
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
classDataProcessor(object):"""Base class for data converters for sequence classification data sets."""defget_train_examples(self, data_dir):"""Gets a collection of `InputExample`s for the train set."""raise NotImplementedError()defget_dev_examples(self, data_dir):"""Gets a collection of `InputExample`s for the dev set."""raise NotImplementedError()defget_test_examples(self, data_dir):"""Gets a collection of `InputExample`s for prediction."""raise NotImplementedError()defget_labels(self):"""Gets the list of labels for this data set."""raise NotImplementedError()
@classmethoddef_read_tsv(cls, input_file, quotechar=None):"""Reads a tab separated value file."""with tf.gfile.Open(input_file,"r")as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines =[]for line in reader:
lines.append(line)return lines
classSentimentProcessor(DataProcessor):# 自定义的类,用于处理二分类问题"""Processor for the CoLA data set (GLUE version)."""defget_train_examples(self, data_dir):"""See base class."""return self._create_examples(
self._read_tsv(os.path.join(data_dir,"train.tsv")),"train")defget_dev_examples(self, data_dir):"""See base class."""return self._create_examples(
self._read_tsv(os.path.join(data_dir,"dev.tsv")),"dev")defget_test_examples(self, data_dir):"""See base class."""return self._create_examples(
self._read_tsv(os.path.join(data_dir,"test.tsv")),"test")defget_labels(self):# 二分类问题返回的标签值为0,1"""See base class."""return["0","1"]def_create_examples(self, lines, set_type):"""Creates examples for the training and dev sets."""
examples =[]for(i, line)inenumerate(lines):# Only the test set has a header
guid ="%s-%s"%(set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))return examples
defconvert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):"""Converts a single `InputExample` into a single `InputFeatures`."""ifisinstance(example, PaddingInputExample):# 相当于实例example为空,返回的数据return InputFeatures(
input_ids=[0]* max_seq_length,
input_mask=[0]* max_seq_length,
segment_ids=[0]* max_seq_length,
label_id=0,
is_real_example=False)
label_map ={}for(i, label)inenumerate(label_list):# 标签映射
label_map[label]= i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b =Noneif example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)if tokens_b:# Modifies `tokens_a` and `tokens_b` in place so that the total# length is less than the specified length.# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length -3)# 截断序列对:将序列截断至最大允许长度else:# Account for [CLS] and [SEP] with "- 2"iflen(tokens_a)> max_seq_length -2:
tokens_a = tokens_a[0:(max_seq_length -2)]# The convention in BERT is:# (a) For sequence pairs:# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1# (b) For single sequences:# tokens: [CLS] the dog is hairy . [SEP]# type_ids: 0 0 0 0 0 0 0
tokens =[]
segment_ids =[]
tokens.append("[CLS]")
segment_ids.append(0)for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)if tokens_b:for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)# tokenizer :是bert源码中提供的模块,其实主要作用就是将句子拆分成字,并且将字映射成id
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask =[1]*len(input_ids)# 暂时认为词与词之间的位置关系由索引决定就可以了[1,1,1...] --> index: 0,1,2...# Zero-pad up to the sequence length.whilelen(input_ids)< max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)assertlen(input_ids)== max_seq_length
assertlen(input_mask)== max_seq_length
assertlen(segment_ids)== max_seq_length
label_id = label_map[example.label]if ex_index <5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s"%(example.guid))
tf.logging.info("tokens: %s"%" ".join([tokenization.printable_text(x)for x in tokens]))
tf.logging.info("input_ids: %s"%" ".join([str(x)for x in input_ids]))
tf.logging.info("input_mask: %s"%" ".join([str(x)for x in input_mask]))
tf.logging.info("segment_ids: %s"%" ".join([str(x)for x in segment_ids]))
tf.logging.info("label: %s (id = %d)"%(example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)return feature
# 实现了俩功能:1.调用convert_single_example转化Input_example为Feature_example 2.转换为TFRecord格式,便于大型数据处理deffile_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):"""Convert a set of `InputExample`s to a TFRecord file."""# 写'train.tf_record'文件到output_dir下# TFRecord内部采用二进制编码,加载快,对大型数据转换友好# 此模块主要分为两个部分:1.TFRecord生成器, 2.Example模块
writer = tf.python_io.TFRecordWriter(output_file)# 最外层:是TFRecord生成器部分,内部需要传入tf_examplefor(ex_index, example)inenumerate(examples):if ex_index %10000==0:
tf.logging.info("Writing example %d of %d"%(ex_index,len(examples)))# 跟踪examples转换进度# 把examples数据转化为features,用到前面的单次转换函数
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)# 这是Example模块defcreate_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))return f
features = collections.OrderedDict()# 其实就是一个字典# 以下五句都是调用create_int_feature生成value往features字典中填充
features["input_ids"]= create_int_feature(feature.input_ids)
features["input_mask"]= create_int_feature(feature.input_mask)
features["segment_ids"]= create_int_feature(feature.segment_ids)
features["label_ids"]= create_int_feature([feature.label_id])
features["is_real_example"]= create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))# 最外层是tf.train.Features()的实例,内层是feature的字典
writer.write(tf_example.SerializeToString())
writer.close()# 这是一个闭包,外层函数返回内层函数的引用,内层函数使用外层函数的参数deffile_based_input_fn_builder(input_file, seq_length, is_training,# 此input_file是TFRecord文件
drop_remainder):"""Creates an `input_fn` closure to be passed to TPUEstimator."""# 生成一个input_fn闭包传递给TPUEstimator
name_to_features ={# 是tensorflow example协议中的一种解析,这里面,传入了shape和dtype"input_ids": tf.FixedLenFeature([seq_length], tf.int64),"input_mask": tf.FixedLenFeature([seq_length], tf.int64),"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),"label_ids": tf.FixedLenFeature([], tf.int64),"is_real_example": tf.FixedLenFeature([], tf.int64),}def_decode_record(record, name_to_features):"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)# tf.Example only supports tf.int64, but the TPU only supports tf.int32.# So cast all int64 to int32.for name inlist(example.keys()):
t = example[name]if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name]= t
return example
definput_fn(params):"""The actual input function."""
batch_size = params["batch_size"]# 对于训练,我们需要大量的并行的读取和洗牌# 对于评估,我们不需要洗牌,并行的读取也无关紧要
d = tf.data.TFRecordDataset(input_file)if is_training:
d = d.repeat()# 重复
d = d.shuffle(buffer_size=100)# 洗牌,缓冲区=100
d = d.apply(
tf.contrib.data.map_and_batch(# 调用_decode_record函数:1.解析TFRecord为example 2.int64 to int32lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))return d
return input_fn
def_truncate_seq_pair(tokens_a, tokens_b, max_length):"""Truncates a sequence pair in place to the maximum length."""# 将序列对截断到最大长度max_length# This is a simple heuristic which will always truncate the longer sequence# one token at a time. This makes more sense than truncating an equal percent# of tokens from each, since if one sequence is very short then each token# that's truncated likely contains more information than a longer sequence.whileTrue:
total_length =len(tokens_a)+len(tokens_b)if total_length <= max_length:breakiflen(tokens_a)>len(tokens_b):
tokens_a.pop()else:
tokens_b.pop()# 做了两件事:1.使用modeling.py中的BerModel类创建模型 2.计算交叉熵损失lossdefcreate_model(bert_config, is_training, input_ids, input_mask, segment_ids,# 创建分类器模型
labels, num_labels, use_one_hot_embeddings):"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)# In the demo, we are doing a simple classification task on the entire# segment.## If you want to use the token-level output, use model.get_sequence_output()# instead.
output_layer = model.get_pooled_output()# 输出层
hidden_size = output_layer.shape[-1].value # 隐藏层大小
output_weights = tf.get_variable(# 输出层权重"output_weights",[num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(# 输出层偏置"output_bias",[num_labels], initializer=tf.zeros_initializer())with tf.variable_scope("loss"):if is_training:# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)# dropout:减小模型过拟合,保留90%的网络连接,随机drop 10%
logits = tf.matmul(output_layer, output_weights, transpose_b=True)# w*x的矩阵乘法
logits = tf.nn.bias_add(logits, output_bias)# w*x + b
probabilities = tf.nn.softmax(logits, axis=-1)# 把输出结果指数归一化映射到(0,1)区间
log_probs = tf.nn.log_softmax(logits, axis=-1)# 相当于对上式的每个值求log,落在 负无穷到0之间# 标签one_hot编码,相当于增加标签维度,变稀疏化
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss =-tf.reduce_sum(one_hot_labels * log_probs, axis=-1)# 交叉熵损失:交叉熵的值越小,两个概率分布就越接近
loss = tf.reduce_mean(per_example_loss)return(loss, per_example_loss, logits, probabilities)defmodel_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):"""Returns `model_fn` closure for TPUEstimator."""# 返回给TPUEstimator闭包model_fndefmodel_fn(features, labels, mode, params):# pylint: disable=unused-argument"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")for name insorted(features.keys()):
tf.logging.info(" name = %s, shape = %s"%(name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example =Noneif"is_real_example"in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)# tf.cast()数据类型的转换else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)# 生成所有数字为1的tensor
is_training =(mode == tf.estimator.ModeKeys.TRAIN)# 1.创建bert的model 2.计算loss(total_loss, per_example_loss, logits, probabilities)= create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names ={}
scaffold_fn =Noneif init_checkpoint:(assignment_map, initialized_variable_names
)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)if use_tpu:deftpu_scaffold():# 用于创建或收集训练模型通常需要的部件
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")for var in tvars:
init_string =""if var.name in initialized_variable_names:
init_string =", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,# 得到可用于训练的变量名
init_string)
output_spec =Noneif mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,# 模式为训练模式
loss=total_loss,# 损失
train_op=train_op,# 操作
scaffold_fn=scaffold_fn)# 训练模型需要的部件elif mode == tf.estimator.ModeKeys.EVAL:defmetric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)# 返回array中,最大值的索引
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)return{"eval_accuracy": accuracy,"eval_loss": loss,}
eval_metrics =(metric_fn,[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,# 模式为评估模式
loss=total_loss,
eval_metrics=eval_metrics,# 此处内含:accuracy和loss
scaffold_fn=scaffold_fn)else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,# 此probabilities为softmax的计算的概率
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)return output_spec
return model_fn
defmain(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors ={"cola": ColaProcessor,# 填入自定义处理数据的类,必填项"mnli": MnliProcessor,"mrpc": MrpcProcessor,"xnli": XnliProcessor,"sentiment": SentimentProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,# 验证实例,匹配检查点
FLAGS.init_checkpoint)ifnot FLAGS.do_train andnot FLAGS.do_eval andnot FLAGS.do_predict:raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)# 上面三个判断至少有一个为真执行下面语句if FLAGS.max_seq_length > bert_config.max_position_embeddings:# 语句长度设置的比如为128,不应大于bert官方训练时的长度raise ValueError("Cannot use sequence length %d because the BERT model ""was only trained up to sequence length %d"%(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)# 此语句会创建output_dir目录,所以,只要指定,无需再单独创建
task_name = FLAGS.task_name.lower()# 传入的自定义类Sentiment名称小写化为sentimentif task_name notin processors:raise ValueError("Task not found: %s"%(task_name))
processor = processors[task_name]()# 类() --> 实例化
label_list = processor.get_labels()# 类.方法,此label_list为['0','1']类别标签集
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)# 加载训练的中文词典,输入数据是否小写化
tpu_cluster_resolver =None# tpu集群处理if FLAGS.use_tpu and FLAGS.tpu_name:# 使用tpu时创建tpu集群处理实例
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 # per_host:每主机,XLnet中num_core_per_host指的是每主机核数
run_config = tf.contrib.tpu.RunConfig(# tpu的运行配置
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(# tf.contrib模块是tf.nn的上一层的tf.layer的上层,主要提供一些图上的操作:如正则化,摘要操作。。。
iterations_per_loop=FLAGS.iterations_per_loop,# 在每个estimator调用中执行多少步,default中为1000步
num_shards=FLAGS.num_tpu_cores,# tpu核数,default为8
per_host_input_for_training=is_per_host))
train_examples =None
num_train_steps =None
num_warmup_steps =Noneif FLAGS.do_train:# 返回列表是一行一行的Inputexample对象,每行包括了guid,train_a,label..
train_examples = processor.get_train_examples(FLAGS.data_dir)# 训练的次数:(训练集的样本数/每批次大小)*训练几轮
num_train_steps =int(len(train_examples)/ FLAGS.train_batch_size * FLAGS.num_train_epochs)# 举例:num_train_steps = (20000/32)*3 = 1800次,# 也就是权重更新,loss下降1800次 # 在预热学习中,线性地增加学习率
num_warmup_steps =int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)# If TPU is not available, this will fall back to normal Estimator on CPU# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(# 构建估计器对象
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir,"train.tf_record")# record:记录,档案# 实现Input_example到Feature_example, TFRecord化
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d",len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)# 调用此函数,完成:1.TFRecord to example 2.int64 to int32
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples =len(eval_examples)if FLAGS.use_tpu:# 不为整数倍时,填充whilelen(eval_examples)% FLAGS.eval_batch_size !=0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir,"eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")# 数量 =(实际的数量,填充的数量)
tf.logging.info(" Num examples = %d (%d actual, %d padding)",len(eval_examples), num_actual_eval_examples,len(eval_examples)- num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)# This tells the estimator to run through the entire set.
eval_steps =None# 遍历整个集合# However, if running eval on the TPU, you will need to specify the# number of steps.if FLAGS.use_tpu:# 假定使用TPU时,前面整除处理已经成功,将得到eval_steps为整数值assertlen(eval_examples)% FLAGS.eval_batch_size ==0
eval_steps =int(len(eval_examples)// FLAGS.eval_batch_size)# 如果使用tpu的话,删除剩余的部分(可能是无法整除的部分)
eval_drop_remainder =Trueif FLAGS.use_tpu elseFalse# 1.TFRecord to example 2.int64 to int32 为estimator提供输入
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)# 生成验证集评估指标数据
output_eval_file = os.path.join(FLAGS.output_dir,"eval_results.txt")with tf.gfile.GFile(output_eval_file,"w")as writer:
tf.logging.info("***** Eval results *****")for key insorted(result.keys()):
tf.logging.info(" %s = %s", key,str(result[key]))
writer.write("%s = %s\n"%(key,str(result[key])))if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples =len(predict_examples)if FLAGS.use_tpu:whilelen(predict_examples)% FLAGS.predict_batch_size !=0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir,"predict.tf_record")# 1.调用convert_single_example转化Input_example为Feature_example# 2.转换为TFRecord格式,便于大型数据处理
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",len(predict_examples), num_actual_predict_examples,len(predict_examples)- num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder =Trueif FLAGS.use_tpu elseFalse# 1.TFRecord to example 2.int64 to int32 为estimator提供输入
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)# 写测试集预测结果文件
output_predict_file = os.path.join(FLAGS.output_dir,"test_results.tsv")with tf.gfile.GFile(output_predict_file,"w")as writer:
num_written_lines =0
tf.logging.info("***** Predict results *****")for(i, prediction)inenumerate(result):# 写入probabilities的键值对,比如二分类:有预测为0的一列,预测为1的一列
probabilities = prediction["probabilities"]if i >= num_actual_predict_examples:break
output_line ="\t".join(str(class_probability)# 例如:某一行class_probability为(0.98,0.02)->('0.98','0.02')for class_probability in probabilities)+"\n"
writer.write(output_line)
num_written_lines +=1assert num_written_lines == num_actual_predict_examples
if __name__ =="__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()