import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tffrom tensorflow import keras
print(tf.__version__)
2.0.0-alpha0
imdb = keras.datasets.imdb
vocab_size = 10000
index_from = 3
# num_words:词表的最大值;index_from:从这个index开始索引实际单词,默认为3
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = vocab_size, index_from = index_from)word_index = imdb.get_word_index()
print(len(word_index))
88584
word_index = {k:(v+3) for k, v in word_index.items()}
word_index['<PAD>'] = 0
word_index['<START>'] = 1
word_index['<UNK>'] = 2
word_index['<END>'] = 3
reverse_word_index = dict([(value, key) for key, value in word_index.items()])
def decode_review(text_ids):
return ' '.join([reverse_word_index.get(word_id, "<UNK>") for word_id in text_ids])
max_length = 500
# keras.preprocessing.sequence.pad_sequences:由于输入是变长的,因此需要使用padding进行对齐
train_data = keras.preprocessing.sequence.pad_sequences(
train_data, # list of list
value = word_index['<PAD>'], # padding填充的数值
padding = 'post', # post, pre:post表示在句子的后面进行padding,pre表示在句子的前面进行padding
maxlen = max_length)test_data = keras.preprocessing.sequence.pad_sequences(
test_data, # list of list
value = word_index['<PAD>'],
padding = 'post', # post, pre
maxlen = max_length)
embedding_dim = 16
batch_size = 512bi_rnn_model = keras.models.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
keras.layers.Bidirectional(keras.layers.LSTM(units = 32, return_sequences = False)),
keras.layers.Dense(32, activation = 'relu'),
keras.layers.Dense(1, activation='sigmoid'),
])bi_rnn_model.summary(