TensorFlow2.0之RNN情感分类问题实战
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential, layers, datasets, optimizers, losses
import numpy as np
import os
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
# 加载数据
# 批大小
batchsize = 128
# 词汇表大小
total_words = 10000
# 句子最大长度s,大于的句子部分将截断,小于的将填充
max_review_len = 80
# 词向量特征长度n
embedding_len = 100
# 加载IMDB数据集,此处的数据采用数字编码,一个数字代表一个单词
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)
# 打印输入的形状,标签的形状
# print(x_train.shape, len(x_train[0]), y_train.shape) # (25000,) 218 (25000,)
# print(x_test.shape, len(x_test[0]), y_test.shape) # (25000,) 68 (25000,)
# 数字编码表
word_index = keras.datasets.imdb.get_word_index()
# 打印出编码表的单词和对应的数字
# for k, v in word_index.items():
# print(k, v)
# 前面四个ID是特殊位
word_index = {k: (v + 3) for k, v in word_index.items()}
# 填充标志
word_index["<PAD>"] = 0
# 起始标志
word_index["<START>"] = 1
# 未知单词的标志
word_index["<UNK>"] = 2
# 没有用到单词的标志
word_index["<UNUSED>"] = 3
# 翻转编码表
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# 将数字编码的句子转换为字符串数据
def decode_review(text):
return ''.join([reverse_word_index.get(i, '?') for i in text])
decode_review(x_train[0])
# print(x)
# 截断和填充句子,使得等长,此处长句子保留句子后面的部分,短句子在前面填充
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)
# 构建数据集,打散,批量,并丢掉最后一个不够batchsize的batch
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.shuffle(1000).batch(batchsize, drop_remainder=True)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.batch(batchsize, drop_remainder=True)
# 统计数据集属性
# print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))
# print('x_test shape:', x_test.shape)
# 网络模型
class MyRNN(keras.Model):
# Cell方式构建多层网络
def __init__(self, units):
super(MyRNN, self).__init__()
# [b, 64], 构建Cell初始化状态向量,重复使用
self.state0 = [tf.zeros([batchsize, units])]
self.state1 = [tf.zeros([batchsize, units])]
# 词向量编码[b, 80] => [b, 80, 100]
self.embedding = layers.Embedding(total_words, embedding_len, input_length=max_review_len)
# 构建两个Cell,使用dropout技术防止过拟合
self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)
# 构建分类网络,用于将CELL的输出特征进行分类,2分类
# [b, 80, 100] => [b, 64] => [b, 1]
self.outlayer = Sequential([
layers.Dense(units),
layers.Dropout(rate=0.5),
layers.ReLU(),
layers.Dense(1)])
def call(self, inputs, training=None):
# [b, 80]
x = inputs
# 获取词向量:[b, 80] => [b, 80, 100]
x = self.embedding(x)
# 通过2个RNN CELL,[b, 80, 100] => [b, 64]
state0 = self.state0
state1 = self.state1
# word:[b, 100]
for word in tf.unstack(x, axis=1):
out0, state0 = self.rnn_cell0(word, state0, training)
out1, state1 = self.rnn_cell1(out0, state1, training)
# 末层最后一个输出作为分类网络的输入:[b, 64] => [b, 1]
x = self.outlayer(out1, training)
# 通过激活函数,p(y is pos|x)
prob = tf.sigmoid(x)
return prob
def main():
# RNN 状态向量长度n
units = 64
# 训练世代
epochs = 50
# 创建模型
model = MyRNN(units)
# 装配
model.compile(optimizer=optimizers.Adam(1e-3), loss=losses.BinaryCrossentropy(), metrics=['accuracy'],
experimental_run_tf_function=False)
# 训练和验证
model.fit(db_train, epochs=epochs, validation_data=db_test)
# 测试
model.evaluate(db_test)
if __name__ == '__main__':
main()