如何使用“预训练的词向量”,做文本分类

  之前一直不知道,怎么使用预训练得词向量,现在终于知道了!!!

 

  代码可以直接运行

 

 

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from keras.layers import Embedding, LSTM, GRU, Dropout, Dense, Input
from keras.models import Model, Sequential, load_model
from keras.preprocessing import sequence
from keras.datasets import imdb
import gensim
from gensim.models.word2vec import Word2Vec


'''
以LSTM为例,LSTM的长度为MAX_SEQ_LEN;每个cell输入一个单词,这个单词用one-hot表示
词向量矩阵是embedMatrix,记录词典中每个词的词向量;词的idx,对应embedMatrix的行号
“该词的ont-hot向量”点乘“embedMatrix”,便得到“该词的词向量表示”

比如:词典有5个词,也即:word2idx = {_stopWord:0, love:1, I:2, my:3, you:4, friend:5, my:6};每个词映射到2维;
输入句子:"I love my pen",  #pen是停用词,其idx设为0
                     [0,       0]
                     [0.3,   0.1]
[0, 0, 1, 0, 0, 0]   [-0.4, -0.5]   [-0.4, -0.5]
[0, 1, 0, 0, 0, 0] · [0.5,   0.2] = [0.3,   0.1]
[0, 0, 0, 0, 0, 1]   [-0.7,  0.6]   [-0.3, -0.8]
[1, 0, 0, 0, 0, 0]   [-0.3, -0.8]   [0,       0]
                     [0.5,   0.2]
'''
MAX_SEQ_LEN = 250
inPath = '../data/'


def train_W2V(sentenList, embedSize=300, epoch_num=1):
    w2vModel = Word2Vec(sentences=sentenList, hs=0, negative=5, min_count=5, window=5, iter=epoch_num, size=embedSize)
    w2vModel.save(inPath + 'w2vModel')
    return w2vModel


def build_word2idx_embedMatrix(w2vModel): word2idx = {"_stopWord": 0} # 这里加了一行是用来过滤停用词的。 vocab_list = [(w, w2vModel.wv[w]) for w, v in w2vModel.wv.vocab.items()] embedMatrix = np.zeros((len(w2vModel.wv.vocab.items()) + 1, w2vModel.vector_size)) for i in range(0, len(vocab_list)): word = vocab_list[i][0] word2idx[word] = i + 1 embedMatrix[i + 1] = vocab_list[i][1] return word2idx, embedMatrix def make_deepLearn_data(sentenList, word2idx): X_train_idx = [[word2idx.get(w, 0) for w in sen] for sen in sentenList] X_train_idx = np.array(sequence.pad_sequences(X_train_idx, maxlen=MAX_SEQ_LEN)) # 必须是np.array()类型 return X_train_idx


def Lstm_model(embedMatrix): # 注意命名不能和库函数同名,之前命名为LSTM()就出很大的错误!! input_layer = Input(shape=(MAX_SEQ_LEN,), dtype='int32') embedding_layer = Embedding(input_dim=len(embedMatrix), output_dim=len(embedMatrix[0]), weights=[embedMatrix], # 表示直接使用预训练的词向量 trainable=False)(input_layer) # False表示不对词向量微调 Lstm_layer = LSTM(units=20, return_sequences=False)(embedding_layer) drop_layer = Dropout(0.5)(Lstm_layer) dense_layer = Dense(units=1, activation="sigmoid")(drop_layer) model = Model(inputs=[input_layer], outputs=[dense_layer]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) return model if __name__ == '__main__': (X_train, y_train), (X_test, y_test) = imdb.load_data() X_all = (list(X_train) + list(X_test))[0: 1000] y_all = (list(y_train) + list(y_test))[0: 1000] print(len(X_all), len(y_all)) imdb_word2idx = imdb.get_word_index() imdb_idx2word = dict((idx, word) for (word, idx) in imdb_word2idx.items()) X_all = [[imdb_idx2word.get(idx - 3, '?') for idx in sen][1:] for sen in X_all] # print(y_all[0: 1], X_all[0: 1]) w2vModel = train_W2V(X_all, embedSize=300, epoch_num=2) word2idx, embedMatrix = build_word2idx_embedMatrix(w2vModel) # 制作word2idx和embedMatrix X_all_idx = make_deepLearn_data(X_all, word2idx) # 制作符合要求的深度学习数据 y_all_idx = np.array(y_all) # 一定要注意,X_all和y_all必须是np.array()类型,否则报错 # print(y_all_idx[0: 1], X_all_idx[0: 1]) X_tra_idx, X_val_idx, y_tra_idx, y_val_idx = train_test_split(X_all_idx, y_all_idx, test_size=0.2, random_state=0, stratify=y_all_idx) print('————————————————模型的训练和预测————————————————') model = Lstm_model(embedMatrix) model.fit(X_tra_idx, y_tra_idx, validation_data=(X_val_idx, y_val_idx), epochs=1, batch_size=100, verbose=1) y_pred = model.predict(X_val_idx) y_pred_idx = [1 if prob[0] > 0.5 else 0 for prob in y_pred] print(f1_score(y_val_idx, y_pred_idx)) print(confusion_matrix(y_val_idx, y_pred_idx))

 

转载于:https://www.cnblogs.com/liguangchuang/p/10074075.html

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值