之前把模型介绍了,也自己写了代码实现,但一直拖着没有吧博客写出来。最近要做句子相似度相关的项目,想要自己写一个模型,想到了这个简单的模型,才惊觉博客一直没写,所以现在补课,也是一个复习TensorFlow实现模型的过程。本文主要参考了Denny Britz的文章和代码。
1.数据及预处理
实验所用的数据集是Movie Review data from Rotten Tomatoes,即MR电影评论数据,其中包含10662条评论,一半正面评论,一半负面。共包含18758个单词(vocab_size),最长的评论有56个单词(Padding,sequence_len),保存在data目录下的rt-polarity.neg和rt-poliarity.pos文件中。这里我们使用10%作为验证集,剩下的作为训练集。数据预处理部分写在data_helpers.py文件中,其实代码很简单,这里仅对load_data_and_labels函数进行介绍:
def load_data_and_labels(positive_data_file, negative_data_file):
#从文件中读取数据
# 将file里所有的句子转化成一个list,每一个句子是list的一个元素
positive_examples = list(open(positive_data_file, "r", encoding="utf-8").readlines())
# 将每个句子两端的转义字符比如换行和空格等删除,仍然是一个list
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding="utf-8").readlines())
negative_examples = [s.strip() for s in negative_examples]
#positive和negative两个列表合成一个
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
#正面评价的label是[0,1],负面评价的label是[1,0]
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
#y是二维数组的形式是这样的:前正例数量的label是[0,1],后负例数量的label是[1,0]
#这样y中label就和x_text中的句子一一对应了
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
然后我们将数据载入,构建vocabulary,并且将每个句子转化为单词的索引列表。这部分代码在train.py中实现:
# Load data
print("loading data....")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# build vocabulary
# 获取句子最大长度,用于padding,这里的值是56
max_document_length = max([len(x.split(" ")) for x in x_text])
# 调用tf内部函类VocabularyProcessor,其会读取x_text,按照词语出现顺序构建vocabulary,并给每个单词以索引
# 例如第一个出现的单词编号为1,第二个出现的为2,以此类推。用0进行padding
# 然后返回的x是形如[[1,2,3,4...],...,[55,66,777...]]的嵌套列表。
# 内列表代表每个句子,其值是评论中每个词在vocabulary中的索引。所以x是10662*56维
# 注意vocabulary和x并不是一件事情
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
print("x::", x)
# randomly shuffle data
np.random.seed(10)
# np.arange创建等差数组。默认起点0,终点len(y),步长1
# permutation会返回一个根据随机数打乱顺序的新数组,不改变原数组
shuffle_indices = np.random.permutation(np.arange(len(y)))
# shuffle后的xy仍然是以一一对应的
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# split train/test set 构建训练集和测试集,10%作为验证集,剩下的作为训练集
# -1取负号是从数组最后开始倒着取的意思
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
print(len(y))
print(dev_sample_index)
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
2.构建CNN网络模型
代码在text_cnn.py中,大家可以参考上一篇模型介绍的博客。 因为代码中的注释已经比较明确了 ,所以这里不做过多的介绍,大家进去看看注释就好~
class TextCNN(object):
"""
embedding layer , followed by a convolutional ,max-pooling and softmax layer
"""
def __init__(
# 每个句子的长度sequence_length=56,
# 类别数num_classes=2
# 词汇表大小vocab_size=18758
# 词向量维度embedding_siza=128
# 卷积核纵向长度filter_size,也就是一个卷积核一次所能覆盖的单词数量,这里是[3,4,5]三个
# 卷积核个数num_filters=128
# 正则化强度l2_reg_lambda默认0
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# 输入 输出 dropout 的 占位符
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
# 根据输入x的单词在vocabulary中的索引lookup得到每个单词对应的词向量
# 这里没有使用Word2Vec,而是随机初始化的。也就是论文中提到的non-static
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
# input_x是None*56维的,lookup时是对56个单词都去查找一个词向量
# 因此生成的embedded_chars是None*56*128
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
# 增加一个维度,-1表示最后一维,默认增加维度中的数值是1
# 因为卷积操作conv2d()需要输入的是四维数据,分别代表着批处理大小、宽度、高度、通道数。
# 而embedded_chars只有前三维,所以需要添加一维,设为1。变为:[None, sequence_length, embedding_size, 1]
# 注意这里embedded_chars_expanded 是 None*56*128*1
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# 为每一个filter_size构建卷积层和最大池化层,将最后的结果合并成一个大的特征向量
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# 构建卷积核尺寸 输入和输出channel分别为1和num_filters(128)
# filter_shape = [3/4/5, 128, 1, 128]
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
# 对于sequence_length个句子组成的图片而言,
# 每个经过卷积层输出的形状[1, sequence_length - filter_size + 1, 1, 1]
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv"
)
# 非线性激活函数Relu :relu(W*x + b)
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# 池化
# 经过池化层输出的shape:[None, 1, 1, num_filters]
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool"
)
# pooled_outputs最终是一个长度是3的列表,每个元素都是[None, 1, 1, 128]的tensor张量
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
# 将pooled_outputs在第四个维度上进行合并,变成一个[None,1,1,384]Tensor张量
self.h_pool = tf.concat(pooled_outputs, 3)
# 展成二维tensor [None, 384]
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("drop_out"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
# 全连接层计算输出向量(w*h+b)和预测(scores向量中的最大值即为预测结果)
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer()
)
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="prediction")
# Calculate mean cross-entropy loss 计算scores和input_y的交叉熵损失函数
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy 计算准确度,预测和真实标签相同即为正确
with tf.name_scope("accuracy"):
# 返回正确或者错误的布尔值
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
# cast将布尔值转化为float ,True是1
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
对上述代码中使用的常用函数做个总结:
1,tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None,
name=None):执行卷积操作
input是一个四维Tensor:[batch, in_height, in_width, in_channels]
filter是一个四维Tensor:[filter_height, filter_width, in_channels,
out_channels]
strides卷积步长四维Tensor:必须满足[1, stride, stride, 1]的格式,每一位代表在输入上每维移动的步长。
padding:“SAME”或者“VALID”,意味着宽卷积和窄卷积。
2,tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)输出服从均匀分布[minval, maxval)的随机初始化函数。shape为要输出结果尺寸
3,tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)输出服从截断正态随机分布的随机初始化函数。生成的值会遵循一个指定了平均值和标准差的正态分布,只保留两个标准差以内的值,超出的值会被弃掉重新生成。
3.训练代码
代码里的注释已经比较详细了,所以这里不做过多介绍
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
# 允许TensorFlow回退到特定设备,并记录程序运行的设备信息
# log_device_placement: 是否打印设备分配日志
# allow_soft_placement:如果你指定的设备不存在,允许TF自动分配设备
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement
)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda
)
# Define Training procedure
# trainable=False表明该参数虽然是Variable但并不属于网络运行参数,无需计算梯度并更新
global_step = tf.Variable(0, name="global_step", trainable=False)
# 优化器的学习速率是0.001
optimizer = tf.train.AdamOptimizer(1e-3)
# compute_gradients返回一个各个变量对于loss的梯度的列表
grads_and_vars = optimizer.compute_gradients(cnn.loss)
# 把梯度Apply到变量上面去。其实就是按照梯度下降的方式加到上面去
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# 将一些想要在tensorboard中展示的网络参数记录下来,保存到summary中
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
# tf.nn.zero_fraction返回0在value中的小数比例
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# checkpoint directory. Tensorflow assume this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoint"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
a single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
# sess.run()运行一次网络优化,并将相应信息输出
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict
)
time_str = datetime.datetime.now().isoformat()
print("{}: step{}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
# 验证集效果检测
def dev_step(x_batch, y_batch, writer=None):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict
)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
# zip 将x_train和y_train做成一个一一对应的列表
batches = data_helpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch) #将batch解压
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
print("current_step:", current_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))