待补全……
'''
RNN 实现文本分类
文本生成
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
#数据集,电影的评论,被分为positive and
imdb = keras.datasets.imdb
vocab_size = 10000
index_from = 3
#载入数据
(train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words = vocab_size,index_from = index_from)
#载入词表
word_index = imdb.get_word_index()
word_index = {k:(v+3) for k, v in word_index.items()}#把所有的id往上偏移 3
#几个特殊字符
word_index['<PAD>'] = 0
word_index['START'] = 1
word_index['UNK'] = 2
word_index['END'] = 3
reverse_word_index = dict([(value, key) for key, value in word_index.items()])
#解码看一下文本是什么:构建词表索引
def decode_review(text_ids):
return ''.join([reverse_word_index.get(word_id,"UNK") for word_id in text_ids])
decode_review(train_data[0])#解码显示句子
#对数据进行补全(padding)
max_length = 500 #句子长度低于500的句子会被补全,高于500 的会被截断
train_data = keras.preprocessing.sequence.pad_sequences(
train_data,#list of list
value = word_index['<PAD>'],
padding = 'post',#post是把padding安在句子的后面,pre是把padding安在句子的前面
maxlen = max_length
)
test_data = keras.preprocessing.sequence.pad_sequences(
test_data,#list of list
value = word_index['<PAD>'],
padding = 'post',#post是把padding安在句子的后面,pre是把padding安在句子的前面
maxlen = max_length
)
#打印第一个样本
print(train_data[0])
#定义模型
embedding_dim = 16#每个word都embedding成长度为16的向量
batch_size = 128
double_rnn_model = keras.models.Sequential([
keras.layers.Embedding(vocab_size,embedding_dim,input_length = max_length),#第一层是embedding层
#-----------实现多层RNN的部分 前面的return_sequences=True,最后一个为False-------------------#
keras.layers.Bidirectional(keras.layers.SimpleRNN(units = 32,return_sequences = False)),
keras.layers.Dense(32,activation="relu"),
keras.layers.Dense(1,activation="sigmoid"),
])
double_rnn_model.summary()
double_rnn_model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ['accuracy'])
#开始训练,加上batch信息,从训练集里划分出20%作为验证集
history_double_rnn = double_rnn_model.fit(train_data,
train_labels,
epochs = 30,
batch_size = batch_size,
validation_split = 0.2
)
def plot_learning_curves(history,label,epochs,min_value,max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize = (8,5))
plt.grid(True)
plt.axis([0,epochs,min_value,max_value])
plt.show()
plot_learning_curves(history_double_rnn, 'accuracy', 30, 0, 1)
plot_learning_curves(history_double_rnn, 'loss', 30, 0, 1)
#在测试集上验证一下
double_rnn_model.evaluate(test_data, test_labels,batch_size = batch_size,)
运行结果: