TextCNN keras 实现

本文介绍如何使用TextCNN模型结合预训练的Word2Vec进行文本分类,包括词嵌入、卷积层与池化操作,以及在IMDb电影评论数据集上的应用。通过训练和测试,实现了一个高精度的情感分析模型,准确率达到了XX%。
摘要由CSDN通过智能技术生成

参考:https://zhuanlan.zhihu.com/p/77634533

from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
from sklearn import metrics
import keras
from keras.preprocessing import sequence
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, concatenate
from keras.layers import Embedding, Input, MaxPooling1D, Flatten
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from sklearn.model_selection import train_test_split
import pandas as pd
import jieba
import re
import gensim
from keras.preprocessing.sequence import pad_sequences
from gensim.models import KeyedVectors
from gensim.models import word2vec
from keras.preprocessing.text import Tokenizer
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
from keras.models import load_model
import joblib
from sklearn.model_selection import GridSearchCV
from keras.utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier 

KTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu':0})))


# 构建TextCNN模型,模型结构:词嵌入-卷积池化*3-拼接-全连接-dropout-全连接
def TextCNN_model(maxlen, max_features, embedding_dims, class_num, embedding_weights, activation):
    main_input = Input(shape=(maxlen,), dtype='float64')

    # 词嵌入(使用预训练的词向量)
    embedder = Embedding(max_features + 1, embedding_dims, input_length=maxlen, weights=[embedding_weights])
    embed = embedder(main_input)

    # 词窗大小分别为3,4,5
    cnn1 = Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
    cnn1 = MaxPooling1D()(cnn1)
    cnn2 = Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
    cnn2 = MaxPooling1D()(cnn2)
    cnn3 = Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
    cnn3 = MaxPooling1D()(cnn3)

    # 合并三个模型的输出向量
    cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
    flat = Flatten()(cnn)
    drop = Dropout(0.2)(flat)
    main_output = Dense(class_num, activation=activation)(drop)

    model = Model(inputs=main_input, outputs=main_output)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model

word2vec = gensim.models.Word2Vec.load("../model/word2vec.model")
max_features = word2vec.syn1.shape[0]
embedding_dims = word2vec.syn1.shape[1]

# We add an additional row of zeros to the embeddings matrix to represent unseen words and the NULL token.
embedding_weights = np.zeros((word2vec.syn1.shape[0] + 1, word2vec.syn1.shape[1]), dtype="float32")
embedding_weights[:word2vec.syn1.shape[0]] = word2vec.syn1

label_dict = np.load('../data/label_dict.npy', allow_pickle=True).tolist()
label_dict = dict(zip(label_dict.values(), label_dict.keys()))
jieba.load_userdict('../data/OTA_words.txt')

st = re.compile('\[.*?\]')
rule = re.compile(u"[^\u4e00-\u9fa5]")


def handel_query(line):
    mytext = st.sub('', line)
    line = rule.sub('', mytext)
    line = list(jieba.cut(line))
    return line


dataset = pd.read_csv('../data/all_data_final.csv')
dataset['words'] = dataset['query'].apply(lambda x: handel_query(x))

dataset['token'] = dataset['words'].apply(lambda words: [word2vec.wv.vocab[token].index if token in word2vec.wv.vocab else max_features for token in words])


batch_size = 1000
epochs = 10
class_num = 339
activation = 'softmax'
maxlen = 256

train_df = dataset[0: int(dataset.shape[0] * 0.9)]
test_df = dataset[int(dataset.shape[0] * 0.9):]

x_train = train_df['token'].tolist()
x_train = pad_sequences(x_train, maxlen=maxlen)  # 将超过固定值的部分截掉,不足的在最前面用0填充

y_train = to_categorical(train_df['label_id'].tolist(), num_classes=class_num)  # 将标签转换为one-hot编码

model = TextCNN_model(maxlen, max_features, embedding_dims, class_num, embedding_weights, activation)

print('Train...')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)

# 模型保存
model.save('./model/textcnn_model.h5')

words = test_df['token'].tolist()
result = model.predict(pad_sequences(words, maxlen=maxlen))
y_predict = np.argmax(result, axis=1)  # 获得最大概率对应的标签

print('准确率', metrics.accuracy_score(test_df['label_id'].tolist(), y_predict))

word2vec 预训练模型维度:
在这里插入图片描述

模型结构:
在这里插入图片描述

训练过程:
在这里插入图片描述

测试集上准确率:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值