keras使用word2cev在imdb数据集上做词嵌入,构建embedding矩阵

8 篇文章 0 订阅
6 篇文章 1 订阅

数据加载

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from keras.layers import Embedding, LSTM, GRU, Dropout, Dense, Input
from keras.models import Model, Sequential, load_model
from keras.preprocessing import sequence
from keras.datasets import imdb
import gensim
from gensim.models.word2vec import Word2Vec
(X_train, y_train), (X_test, y_test) = imdb.load_data()
X_all = (list(X_train) + list(X_test))[0: 1000]
y_all = (list(y_train) + list(y_test))[0: 1000]
print(len(X_all), len(y_all))
#这里的imdb_word2idx 不同于word2idx,仅限于选取imdb的数据使用
imdb_word2idx = imdb.get_word_index()#字典{word:word_index}
imdb_idx2word = dict((idx, word) for (word, idx) in imdb_word2idx.items())#字典{word_index:word}
X_all = [[imdb_idx2word.get(idx-3, '?') for idx in sen][1:] for sen in X_all]#不知道为啥减三,但是就是对。。。。
print(X_all[1: 2])#X_all是一个分词之后的list,X_all的一行代表一句话,一行内的多个元素是这一句话内的多个word
输出:
[['big', 'hair', 'big', 'boobs', 'bad', 'music', 'and', 'a', 'giant', 'safety', 'pin', 'these', 'are', 'the', 'words', 'to', 'best', 'describe', 'this', 'terrible', 'movie', 'i', 'love', 'cheesy', 'horror', 'movies', 'and', "i've", 'seen', 'hundreds', 'but', 'this', 'had', 'got', 'to', 'be', 'on', 'of', 'the', 'worst', 'ever', 'made', 'the', 'plot', 'is', 'paper', 'thin', 'and', 'ridiculous', 'the', 'acting', 'is', 'an', 'abomination', 'the', 'script', 'is', 'completely', 'laughable', 'the', 'best', 'is', 'the', 'end', 'showdown', 'with', 'the', 'cop', 'and', 'how', 'he', 'worked', 'out', 'who', 'the', 'killer', 'is', "it's", 'just', 'so', 'damn', 'terribly', 'written', 'the', 'clothes', 'are', 'sickening', 'and', 'funny', 'in', 'equal', 'measures', 'the', 'hair', 'is', 'big', 'lots', 'of', 'boobs', 'bounce', 'men', 'wear', 'those', 'cut', 'tee', 'shirts', 'that', 'show', 'off', 'their', 'stomachs', 'sickening', 'that', 'men', 'actually', 'wore', 'them', 'and', 'the', 'music', 'is', 'just', 'synthesiser', 'trash', 'that', 'plays', 'over', 'and', 'over', 'again', 'in', 'almost', 'every', 'scene', 'there', 'is', 'trashy', 'music', 'boobs', 'and', 'paramedics', 'taking', 'away', 'bodies', 'and', 'the', 'gym', 'still', "doesn't", 'close', 'for', 'bereavement', 'all', 'joking', 'aside', 'this', 'is', 'a', 'truly', 'bad', 'film', 'whose', 'only', 'charm', 'is', 'to', 'look', 'back', 'on', 'the', 'disaster', 'that', 'was', 'the', "80's", 'and', 'have', 'a', 'good', 'old', 'laugh', 'at', 'how', 'bad', 'everything', 'was', 'back', 'then']]

使用word2vec构建embedding矩阵

def train_W2V(sentenList, embedSize=300, epoch_num=1):
    w2vModel = Word2Vec(sentences=sentenList, hs=0, negative=5, min_count=5, window=5, iter=epoch_num, size=embedSize)
    #w2vModel.save(inPath + 'w2vModel/')
    return w2vModel
def build_word2idx_embedMatrix(w2vModel):
    word2idx = {"_stopWord": 0}  # 这里加了一行是用来过滤停用词的。
    vocab_list = [(w, w2vModel.wv[w]) for w, v in w2vModel.wv.vocab.items()]
    embedMatrix = np.zeros((len(w2vModel.wv.vocab.items()) + 1, w2vModel.vector_size))#+1是因为停用词
    for i in range(0, len(vocab_list)):
        word = vocab_list[i][0]#vocab_list[i][0]是词
        word2idx[word] = i + 1#因为加了停用词,所以其他索引都加1
        embedMatrix[i + 1] = vocab_list[i][1]#vocab_list[i][1]这个词对应embedding_matrix的那一行
    return word2idx, embedMatrix
w2vModel = train_W2V(X_all, embedSize=300, epoch_num=2)
word2idx, embedMatrix = build_word2idx_embedMatrix(w2vModel)  # 制作word2idx和embedMatrix

数据准备

def make_deepLearn_data(sentenList, word2idx):
    X_train_idx = [[word2idx.get(w, 0) for w in sen] for sen in sentenList]#之前都是通过word处理的,这里word2idx讲word转为id
    X_train_idx = np.array(sequence.pad_sequences(X_train_idx, maxlen=MAX_SEQ_LEN))  # padding成相同长度
    return X_train_idx
X_all_idx = make_deepLearn_data(X_all, word2idx)  # 制作符合要求的深度学习数据
y_all_idx = np.array(y_all)  # 一定要注意,X_all和y_all必须是np.array()类型,否则报错

X_tra_idx, X_val_idx, y_tra_idx, y_val_idx = train_test_split(X_all_idx, y_all_idx, test_size=0.2,                                                    random_state=0, stratify=y_all_idx)

构建模型并训练

def Lstm_model(embedMatrix):  # 注意命名不能和库函数同名,之前命名为LSTM()就出很大的错误!!
    input_layer = Input(shape=(MAX_SEQ_LEN,), dtype='int32')
    embedding_layer = Embedding(input_dim=len(embedMatrix), output_dim=len(embedMatrix[0]),
                                weights=[embedMatrix],  # 表示直接使用预训练的词向量
                                trainable=False)(input_layer)  # False表示不对词向量微调
    Lstm_layer = LSTM(units=20, return_sequences=False)(embedding_layer)
    drop_layer = Dropout(0.5)(Lstm_layer)
    dense_layer = Dense(units=1, activation="sigmoid")(drop_layer)
    model = Model(inputs=[input_layer], outputs=[dense_layer])
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    return model
    
model = Lstm_model(embedMatrix)
model.fit(X_tra_idx, y_tra_idx, validation_data=(X_val_idx, y_val_idx),
          epochs=1, batch_size=100, verbose=1)
y_pred = model.predict(X_val_idx)
y_pred_idx = [1 if prob[0] > 0.5 else 0 for prob in y_pred]

print(f1_score(y_val_idx, y_pred_idx))
print(confusion_matrix(y_val_idx, y_pred_idx))
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值