Keras(二十四)NLP文本分类之embedding,padding,全链接模型构建与训练

38 篇文章 2 订阅
35 篇文章 11 订阅

本文将介绍:

  • 循环神经网络之embedding
  • 循环神经网络之padding
  • 循环神经网络之模型构建与训练

一,从keras数据集imdb中加载影评数据并查看

1,从keras数据集imdb中加载影评数据
# 1,从keras数据集imdb中加载影评数据
imdb = keras.datasets.imdb
vocab_size = 10000  # 出现词频由高到低, 截取前10000个词组,其余按特殊字符处理
index_from = 3  # 截取的单词和对应索引,向后平移3个单位
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
    num_words = vocab_size, index_from = index_from)    # 加载数据
2,查看数据集和label标签数据
# 2,查看数据集和label标签数据
print(train_data[0], train_labels[0])
print(train_data.shape, train_labels.shape)
print(len(train_data[0]), len(train_data[1]))
3,查看测试集数据
# 3,查看测试集数据
print(test_data, test_labels)
print(test_data.shape, test_labels.shape)

二,拿到数据集索引和文字的对应关系并做预处理

1,拿到数据集索引和文字的对应关系
# 1,拿到数据集索引和文字的对应关系
word_index = imdb.get_word_index()
print(len(word_index))
2,将数据集索引和文字的对应关系中的索引平移3个单位
# 2,将数据集索引和文字的对应关系中的索引平移3个单位
word_index = {k:(v+index_from) for k, v in word_index.items()}
3,将平移后空置出的3个位置写入数值
# 3,将平移后空置出的3个位置写入数值
word_index['<PAD>'] = 0
word_index['<START>'] = 1
word_index['<UNK>'] = 2
word_index['<END>'] = 3
4,翻转数据集索引和文字的对应关系
# 4,翻转数据集索引和文字的对应关系
reverse_word_index = dict(
    [(value, key) for key, value in word_index.items()])
5,随意取出一条数据测试效果
# 5,随意取出一条数据测试效果
def decode_review(text_ids):
    return ' '.join(
        [reverse_word_index.get(word_id, "<UNK>") for word_id in text_ids])

decode_review(train_data[1])

三,对训练集,测试集做预处理处理

1,对训练集数据做预处理处理
# 1,对训练集数据做预处理处理
train_data = keras.preprocessing.sequence.pad_sequences(
    train_data, # list of list
    value = word_index['<PAD>'],    # 超出最大值的部分需填充的数据
    padding = 'post',   # post:在后填充; pre:在前填充
    maxlen = max_length)    # 处理段落的最大值 -若超出则阶段;若不足则填充;
2,测试集数据做预处理处理
# 2,测试集数据做预处理处理
test_data = keras.preprocessing.sequence.pad_sequences(
    test_data, # list of list
    value = word_index['<PAD>'],# 超出最大值的部分需填充的数据
    padding = 'post', # post:在后填充; pre:在前填充
    maxlen = max_length)

四,定义模型

embedding_dim = 16  # 每个word都embedding为一个长度为16的向量
batch_size = 128    

model = keras.models.Sequential([
    # 1. 定义一个矩阵:define matrix: [vocab_size, embedding_dim]
    # 2. [1,2,3,4..], max_length * embedding_dim
    # 3. batch_size * max_length * embedding_dim
    keras.layers.Embedding(vocab_size, embedding_dim,trainable=True,# 此处如果是已训练好的,trainable=False
                           input_length = max_length),
    # batch_size * max_length * embedding_dim 
    #   -> batch_size * embedding_dim
    keras.layers.GlobalAveragePooling1D(),
    # keras.layers.Flatten(),  # 可以使用展平,后接入全连接层
    keras.layers.Dense(64, activation = 'relu',kernel_regularizer=regularizers.l2(0.01)),
    keras.layers.Dense(1, activation = 'sigmoid'),
])

model.summary()
model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
              metrics = ['accuracy'])

注意:
使用已经预训练好的的embedding层,可以致参trainable = False

五,训练模型

history = model.fit(train_data, train_labels,
                    epochs = 30,
                    batch_size = batch_size,
                    validation_split = 0.2)

六,打印模型训练曲线

def plot_learning_curves(history, label, epochs, min_value, max_value):
    data = {}
    data[label] = history.history[label]
    data['val_'+label] = history.history['val_'+label]
    pd.DataFrame(data).plot(figsize=(8, 5))
    plt.grid(True)
    plt.axis([0, epochs, min_value, max_value])
    plt.show()
    
plot_learning_curves(history, 'accuracy', 10, 0, 1)
plot_learning_curves(history, 'loss', 10, 0, 1)

七,估计器预测测试数据集准确率

# 七,估计器预测测试数据集准确率
model.evaluate(
    test_data, test_labels,
    batch_size = batch_size,
    verbose = 0)

八,总结代码

#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import regularizers

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)


# 一,从keras数据集imdb中加载影评数据并查看
# 1,从keras数据集imdb中加载影评数据
imdb = keras.datasets.imdb
vocab_size = 10000  # 出现词频由高到低, 截取前10000个词组,其余按特殊字符处理
index_from = 3  # 截取的单词和对应索引,向后平移3个单位
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
    num_words = vocab_size, index_from = index_from)    # 加载数据

# 2,查看数据集和label标签数据
print(train_data[0], train_labels[0])
print(train_data.shape, train_labels.shape)
print(len(train_data[0]), len(train_data[1]))

# 3,查看测试集数据
print(test_data, test_labels)
print(test_data.shape, test_labels.shape)

# 二,拿到数据集索引和文字的对应关系并做预处理
# 1,拿到数据集索引和文字的对应关系
word_index = imdb.get_word_index()
print(len(word_index))

# 2,将数据集索引和文字的对应关系中的索引平移3个单位
word_index = {k:(v+index_from) for k, v in word_index.items()}

# 3,将平移后空置出的3个位置写入数值
word_index['<PAD>'] = 0
word_index['<START>'] = 1
word_index['<UNK>'] = 2
word_index['<END>'] = 3

# 4,翻转数据集索引和文字的对应关系
reverse_word_index = dict(
    [(value, key) for key, value in word_index.items()])

# 5,随意取出一条数据测试效果
def decode_review(text_ids):
    return ' '.join(
        [reverse_word_index.get(word_id, "<UNK>") for word_id in text_ids])

decode_review(train_data[1])

# 三,对训练集,测试集做预处理处理
max_length = 500    # 处理段落的最大值

# 1,对训练集数据做预处理处理
train_data = keras.preprocessing.sequence.pad_sequences(
    train_data, # list of list
    value = word_index['<PAD>'],    # 超出最大值的部分需填充的数据
    padding = 'post',   # post:在后填充; pre:在前填充
    maxlen = max_length)    # 处理段落的最大值 -若超出则阶段;若不足则填充;

# 2,测试集数据做预处理处理
test_data = keras.preprocessing.sequence.pad_sequences(
    test_data, # list of list
    value = word_index['<PAD>'],# 超出最大值的部分需填充的数据
    padding = 'post', # post:在后填充; pre:在前填充
    maxlen = max_length)

# 3,打印处理后的数据
print(train_data[0])

# 四,定义模型
embedding_dim = 16  # 每个word都embedding为一个长度为16的向量
batch_size = 128    

model = keras.models.Sequential([
    # 1. 定义一个矩阵:define matrix: [vocab_size, embedding_dim]
    # 2. [1,2,3,4..], max_length * embedding_dim
    # 3. batch_size * max_length * embedding_dim
    keras.layers.Embedding(vocab_size, embedding_dim,
                           input_length = max_length),
    # batch_size * max_length * embedding_dim 
    #   -> batch_size * embedding_dim
    keras.layers.GlobalAveragePooling1D(),
    # keras.layers.Flatten(),  # 可以使用展平,后接入全连接层
    keras.layers.Dense(64, activation = 'relu',kernel_regularizer=regularizers.l2(0.01)),
    keras.layers.Dense(1, activation = 'sigmoid'),
])

model.summary()
model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
              metrics = ['accuracy'])

# 五,训练模型
history = model.fit(train_data, train_labels,
                    epochs = 30,
                    batch_size = batch_size,
                    validation_split = 0.2)


# 六,打印模型训练曲线
def plot_learning_curves(history, label, epochs, min_value, max_value):
    data = {}
    data[label] = history.history[label]
    data['val_'+label] = history.history['val_'+label]
    pd.DataFrame(data).plot(figsize=(8, 5))
    plt.grid(True)
    plt.axis([0, epochs, min_value, max_value])
    plt.show()
    
plot_learning_curves(history, 'accuracy', 10, 0, 1)
plot_learning_curves(history, 'loss', 10, 0, 1)

# 七,估计器预测测试数据集准确率
model.evaluate(
    test_data, test_labels,
    batch_size = batch_size,
    verbose = 0)
  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
这里提供三种深度学习模型的完整代码,分别是CNN、LSTM和Transformer。 ## CNN模型 ```python import numpy as np import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Embedding, Conv1D, MaxPooling1D from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences # 加载数据 train_data = np.load('train_data.npy') train_labels = np.load('train_labels.npy') test_data = np.load('test_data.npy') test_labels = np.load('test_labels.npy') # 参数设置 max_words = 1000 max_len = 100 embedding_dim = 100 filters = 64 kernel_size = 3 pool_size = 2 hidden_dims = 64 batch_size = 32 epochs = 10 # 构建模型 model = Sequential() model.add(Embedding(max_words, embedding_dim, input_length=max_len)) model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=pool_size)) model.add(Flatten()) model.add(Dense(hidden_dims)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # 训练模型 history = model.fit(train_data, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(test_data, test_labels)) # 评估模型 score = model.evaluate(test_data, test_labels, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` ## LSTM模型 ```python import numpy as np import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, LSTM, Embedding from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences # 加载数据 train_data = np.load('train_data.npy') train_labels = np.load('train_labels.npy') test_data = np.load('test_data.npy') test_labels = np.load('test_labels.npy') # 参数设置 max_words = 1000 max_len = 100 embedding_dim = 100 hidden_dims = 64 batch_size = 32 epochs = 10 # 构建模型 model = Sequential() model.add(Embedding(max_words, embedding_dim, input_length=max_len)) model.add(LSTM(hidden_dims)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # 训练模型 history = model.fit(train_data, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(test_data, test_labels)) # 评估模型 score = model.evaluate(test_data, test_labels, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` ## Transformer模型 ```python import numpy as np import keras from keras.models import Model from keras.layers import Input, Dense, Dropout from keras.layers import Embedding, Multiply, Lambda from keras.layers import LayerNormalization, Add from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import keras.backend as K # 加载数据 train_data = np.load('train_data.npy') train_labels = np.load('train_labels.npy') test_data = np.load('test_data.npy') test_labels = np.load('test_labels.npy') # 参数设置 max_words = 1000 max_len = 100 embedding_dim = 100 hidden_dims = 64 num_heads = 4 dropout_rate = 0.5 batch_size = 32 epochs = 10 # 定义Multi-head Attention层 class MultiHeadAttention(keras.layers.Layer): def __init__(self, num_heads, head_size, dropout_rate=0.0, **kwargs): super(MultiHeadAttention, self).__init__(**kwargs) self.num_heads = num_heads self.head_size = head_size self.dropout_rate = dropout_rate self.q_dense = Dense(num_heads * head_size, activation=None) self.k_dense = Dense(num_heads * head_size, activation=None) self.v_dense = Dense(num_heads * head_size, activation=None) self.dropout = Dropout(dropout_rate) self.output_dense = Dense(head_size * num_heads, activation=None) def call(self, inputs): q = self.q_dense(inputs) k = self.k_dense(inputs) v = self.v_dense(inputs) q = K.reshape(q, (-1, K.shape(q)[1], self.num_heads, self.head_size)) q = K.permute_dimensions(q, (0, 2, 1, 3)) k = K.reshape(k, (-1, K.shape(k)[1], self.num_heads, self.head_size)) k = K.permute_dimensions(k, (0, 2, 1, 3)) v = K.reshape(v, (-1, K.shape(v)[1], self.num_heads, self.head_size)) v = K.permute_dimensions(v, (0, 2, 1, 3)) score = K.batch_dot(q, K.permute_dimensions(k, (0, 1, 3, 2))) score /= K.sqrt(K.cast(self.head_size, dtype=K.floatx())) score = K.softmax(score) score = self.dropout(score) context = K.batch_dot(score, v) context = K.permute_dimensions(context, (0, 2, 1, 3)) context = K.reshape(context, (-1, K.shape(context)[1], self.num_heads * self.head_size)) output = self.output_dense(context) return output # 定义Transformer Block层 class TransformerBlock(keras.layers.Layer): def __init__(self, num_heads, head_size, hidden_dims, dropout_rate=0.0, **kwargs): super(TransformerBlock, self).__init__(**kwargs) self.mha = MultiHeadAttention(num_heads, head_size, dropout_rate) self.dropout1 = Dropout(dropout_rate) self.ln1 = LayerNormalization() self.dense1 = Dense(hidden_dims, activation='relu') self.dropout2 = Dropout(dropout_rate) self.dense2 = Dense(head_size * num_heads, activation=None) self.dropout3 = Dropout(dropout_rate) self.ln2 = LayerNormalization() def call(self, inputs): x = inputs x = self.mha(x) x = self.dropout1(x) x = Add()([inputs, x]) x = self.ln1(x) y = self.dense1(x) y = self.dropout2(y) y = self.dense2(y) y = self.dropout3(y) y = Add()([x, y]) y = self.ln2(y) return y # 构建模型 input = Input(shape=(max_len,)) x = Embedding(max_words, embedding_dim)(input) x = Lambda(lambda x: x * np.sqrt(embedding_dim))(x) x = TransformerBlock(num_heads, hidden_dims // num_heads, hidden_dims, dropout_rate)(x) x = TransformerBlock(num_heads, hidden_dims // num_heads, hidden_dims, dropout_rate)(x) x = Flatten()(x) x = Dense(hidden_dims, activation='relu')(x) x = Dropout(dropout_rate)(x) output = Dense(1, activation='sigmoid')(x) model = Model(inputs=input, outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # 训练模型 history = model.fit(train_data, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(test_data, test_labels)) # 评估模型 score = model.evaluate(test_data, test_labels, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` 以上三种模型代码都是基于Keras框架实现的,其中CNN模型和LSTM模型比较简单,而Transformer模型则比较复杂,但也是当前NLP领域最为流行的模型之一。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值