【深度学习框架Keras】使用常见的预训练词向量_glove_fasttext_word2vec_paragram

说明

1.环境:kaggle kernel
2.数据来源:kaggle(需要手动添加)
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
from tqdm import tqdm
import os
import gc
import time
tqdm.pandas()
Using TensorFlow backend.

参数

max_features = 10000
maxlen = 200
embed_size = 300

数据处理

# 加载数据
train = pd.read_csv("../input/imdb-dataset/labeledTrainData.tsv", header=0,delimiter="\t", quoting=3)
# 建立tokenizer
tokenizer = Tokenizer(num_words=max_features,lower=True)
tokenizer.fit_on_texts(list(train['review']))
#word_index = tokenizer.word_index
x_train = tokenizer.texts_to_sequences(list(train['review']))
x_train = pad_sequences(x_train,maxlen=maxlen) # padding
y_train = list(train['sentiment'])
# 划分训练和验证集
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.3,random_state=0)

del train
gc.collect()
time.sleep(10)

一、不使用预训练词向量

def build_model(embedding_matrix=None):
    inp = Input(shape=(maxlen,))
    if embedding_matrix is None:
        x = Embedding(max_features, embed_size)(inp)
    else:
        x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
    x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
    x = GlobalMaxPool1D()(x)
    x = Dense(16, activation="relu")(x)
    x = Dropout(0.1)(x)
    x = Dense(1, activation="sigmoid")(x)
    model = Model(inputs=inp, outputs=x)
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
model = build_model()
history = model.fit(x_train, y_train, batch_size=512, epochs=5, validation_data=(x_val, y_val))
Train on 17500 samples, validate on 7500 samples
Epoch 1/5
17500/17500 [==============================] - 6s 356us/step - loss: 0.6426 - acc: 0.7134 - val_loss: 0.5268 - val_acc: 0.8199
Epoch 2/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.3931 - acc: 0.8451 - val_loss: 0.3175 - val_acc: 0.8671
Epoch 3/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2254 - acc: 0.9124 - val_loss: 0.2764 - val_acc: 0.8864
Epoch 4/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.1434 - acc: 0.9497 - val_loss: 0.3107 - val_acc: 0.8836
Epoch 5/5
17500/17500 [==============================] - 2s 132us/step - loss: 0.0883 - acc: 0.9730 - val_loss: 0.3606 - val_acc: 0.8732

二、使用glove

def build_matrix(embeddings_index,word_index):
    embedding_matrix = np.zeros((max_features, 300))
    for word, i in tqdm(word_index.items()):
        if i >= max_features: continue
        try:
            # word对应的vector
            embedding_vector = embeddings_index[word]
        except:
            # word不存在则使用unknown的vector
            embedding_vector = embeddings_index["unknown"]
        if embedding_vector is not None:
            # 保证embedding_matrix行的向量与word_index中序号一致
            embedding_matrix[i] = embedding_vector
    return embedding_matrix
EMBEDDING_FILE = '../input/glove840b300dtxt/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
# key:word value:vector
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
glove_embedding_matrix = build_matrix(embeddings_index,tokenizer.word_index)
100%|██████████| 88582/88582 [00:00<00:00, 739018.03it/s]
model = build_model(glove_embedding_matrix)
history = model.fit(x_train, y_train, batch_size=512, epochs=5, validation_data=(x_val, y_val))
Train on 17500 samples, validate on 7500 samples
Epoch 1/5
17500/17500 [==============================] - 4s 233us/step - loss: 0.6197 - acc: 0.6819 - val_loss: 0.4859 - val_acc: 0.8264
Epoch 2/5
17500/17500 [==============================] - 2s 129us/step - loss: 0.4026 - acc: 0.8311 - val_loss: 0.3220 - val_acc: 0.8619
Epoch 3/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2987 - acc: 0.8774 - val_loss: 0.2973 - val_acc: 0.8741
Epoch 4/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2343 - acc: 0.9101 - val_loss: 0.2640 - val_acc: 0.8909
Epoch 5/5
17500/17500 [==============================] - 2s 129us/step - loss: 0.1833 - acc: 0.9345 - val_loss: 0.2606 - val_acc: 0.8951
del embeddings_index,glove_embedding_matrix,history,model
gc.collect()
time.sleep(10)

三、使用FastText

EMBEDDING_FILE = '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split(" ")) for o in open(EMBEDDING_FILE,encoding='utf8') if len(o)>100)
fasttext_embedding_matrix = build_matrix(embeddings_index,tokenizer.word_index)
100%|██████████| 88582/88582 [00:00<00:00, 852801.06it/s]
model = build_model(fasttext_embedding_matrix)
history = model.fit(x_train, y_train, batch_size=512, epochs=5, validation_data=(x_val, y_val))
Train on 17500 samples, validate on 7500 samples
Epoch 1/5
17500/17500 [==============================] - 4s 227us/step - loss: 0.6177 - acc: 0.6759 - val_loss: 0.4807 - val_acc: 0.8299
Epoch 2/5
17500/17500 [==============================] - 2s 129us/step - loss: 0.3718 - acc: 0.8497 - val_loss: 0.2942 - val_acc: 0.8800
Epoch 3/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2571 - acc: 0.8997 - val_loss: 0.2653 - val_acc: 0.8904
Epoch 4/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.1878 - acc: 0.9308 - val_loss: 0.2907 - val_acc: 0.8853
Epoch 5/5
17500/17500 [==============================] - 2s 129us/step - loss: 0.1403 - acc: 0.9522 - val_loss: 0.2757 - val_acc: 0.8909
del embeddings_index,fasttext_embedding_matrix,history,model
gc.collect()
time.sleep(10)

四、使用Paragram

EMBEDDING_FILE = '../input/paragram-300-sl999/paragram_300_sl999/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split(" ")) for o in open(EMBEDDING_FILE,encoding="utf8", errors='ignore') if len(o)>100)
paragram_embedding_matrix = build_matrix(embeddings_index,tokenizer.word_index)
100%|██████████| 88582/88582 [00:00<00:00, 1095548.21it/s]
model = build_model(paragram_embedding_matrix)
history = model.fit(x_train, y_train, batch_size=512, epochs=5, validation_data=(x_val, y_val))
Train on 17500 samples, validate on 7500 samples
Epoch 1/5
17500/17500 [==============================] - 5s 272us/step - loss: 0.6693 - acc: 0.6208 - val_loss: 0.5894 - val_acc: 0.8075
Epoch 2/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.5209 - acc: 0.7810 - val_loss: 0.4233 - val_acc: 0.8328
Epoch 3/5
17500/17500 [==============================] - 2s 131us/step - loss: 0.3823 - acc: 0.8337 - val_loss: 0.3358 - val_acc: 0.8587
Epoch 4/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2885 - acc: 0.8866 - val_loss: 0.2852 - val_acc: 0.8788
Epoch 5/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.2255 - acc: 0.9122 - val_loss: 0.2698 - val_acc: 0.8876
del embeddings_index,paragram_embedding_matrix,history,model
gc.collect()
time.sleep(10)

五、使用googlenews Word2Vec

EMBEDDING_FILE = '../input/googlenewsvectorsnegative300/GoogleNews-vectors-negative300.bin.gz'
word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)
embeddings_index = {}
for i, vec in tqdm(enumerate(word2vec.wv.vectors)):
    embeddings_index[word2vec.wv.index2word[i]] = vec
word2vec_embedding_matrix = build_matrix(embeddings_index,tokenizer.word_index)
del word2vec
/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:4: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).
  after removing the cwd from sys.path.
0it [00:00, ?it/s]/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:5: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).
  """
3000000it [00:11, 259680.85it/s]
100%|██████████| 88582/88582 [00:00<00:00, 1166240.83it/s]
model = build_model(word2vec_embedding_matrix)
history = model.fit(x_train, y_train, batch_size=512, epochs=5, validation_data=(x_val, y_val))
Train on 17500 samples, validate on 7500 samples
Epoch 1/5
17500/17500 [==============================] - 4s 224us/step - loss: 0.6141 - acc: 0.7146 - val_loss: 0.4578 - val_acc: 0.8395
Epoch 2/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.3509 - acc: 0.8546 - val_loss: 0.3101 - val_acc: 0.8701
Epoch 3/5
17500/17500 [==============================] - 2s 131us/step - loss: 0.2441 - acc: 0.9044 - val_loss: 0.2747 - val_acc: 0.8856
Epoch 4/5
17500/17500 [==============================] - 2s 131us/step - loss: 0.1865 - acc: 0.9315 - val_loss: 0.2774 - val_acc: 0.8899
Epoch 5/5
17500/17500 [==============================] - 2s 130us/step - loss: 0.1395 - acc: 0.9526 - val_loss: 0.2914 - val_acc: 0.8897
del embeddings_index,word2vec_embedding_matrix,history,model
gc.collect()
time.sleep(10)
  • 3
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

BQW_

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值