使用embedding进行文本分类实例demo:
- 文本数据集:keras.datasets.imdb,电影评论<好/坏>二分类
- 每个句子都被映射为一组向量(text_ids),通过设置max_length = 500,对句子进行补全/截断的操作。所以这里的dataset表示为一组向量。
- 需要查看句子原始含义,需要从词典中,找到对应的text_id--->word。
- 构建模型:embedding_dim = 16 # 每个word都embedding成长度为16的向量。
- max_length(一个句子长度) embedding_dim(一个单词长度) max_length * embedding_dim代表一个句子总的长度
这里的GlobalAveragePooling1D是为了消除max_length... max_length * embedding_dim------->[................]
代码如下:
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import time
import sys
from tensorflow import keras
# 电影评论 <好/坏> - 二分类
imdb = keras.datasets.imdb
# 载入数据使用
vocab_size = 10000
index_from = 3 # 词表的index从几开始算
# 前一万个词语保留下来,后一万个当成特殊字符处理
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = vocab_size,
index_from = index_from)
print(train_data[0], train_labels[0]) # train
print(train_data.shape, train_labels.shape)
print(len(train_data[0]), len(train_data[1])) # 变长
word_index = imdb.get_word_index() # 载入词表
print(len(word_index))
print(word_index)
word_index = {k: (v+3) for k, v in word_index.items()}
word_index['<PAD>'] = 0
word_index['<START>'] = 1
word_index['<UNK>'] = 2
word_index['<END>'] = 3
reverse_word_index = dict([(value, key) for key, value in word_index.items()])
def decode_review(text_ids):
return ' '.join(
[reverse_word_index.get(word_id, "<UNK>") for word_id in text_ids]
)
decode_review(train_data[0])
# 构建模型
embedding_dim = 16 # 每个word都embedding成长度为16的向量
batch_size = 128
# max_length(一个句子长度) embedding_dim(一个单词长度) max_length * embedding_dim代表一个句子总的长度
# 这里的GlobalAveragePooling1D是为了消除max_length... max_length * embedding_dim------->[................]
model = keras.models.Sequential([
# 1. define matrix: [vocab_size, embedding_dim]
# 2. [1,2,3,4...], max_length * embedding_dim
# 3. batch_size * max_length * embedding_dim
keras.layers.Embedding(vocab_size, embedding_dim,
input_length = max_length),
# batch_size * max_length * embedding_dim
#---> batch_size * embedding_dim
keras.layers.GlobalAveragePooling1D(), # 消除max_length部分,做一个合并pooling的操作
keras.layers.Dense(64, activation = 'relu'),
keras.layers.Dense(1, activation = 'sigmoid'),
])
model.summary()
model.compile(loss = "binary_crossentropy",
optimizer = "adam",
metrics = ['accuracy'])
history = model.fit(train_data, train_labels,
epochs = 20,
batch_size = batch_size,
validation_split = 0.2,)
def plot_learning_curves(history, label, epochs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_' + label] = history.history['val_'+ label]
pd.DataFrame(data).plot(figsize = (8,5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])
plt.show()
plot_learning_curves(history, 'accuracy', 20, 0.8, 1.2)
plot_learning_curves(history, 'loss', 20, 0, 1)
model.evaluate(test_data, test_labels,
batch_size = batch_size)