keras 嵌入层理解 及情感分析小例子

 

1、词嵌入的解释与代码实现:建议大家参考这篇简书上的文章:http://www.jianshu.com/p/0124ac7d72b8

2、keras有一个嵌入层,看官方文档没有搞懂含义,推荐找到的这篇文章:https://yq.aliyun.com/articles/221681

  这是代码:

 

# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 22:26:20 2017

@author: www
"""
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding


# define documents
docs = ['Well done!',
		'Good work',
		'Great effort',
		'nice work',
		'Excellent!',
		'Weak',
		'Poor effort!',
		'not good',
		'poor work',
		'Could have done better.']
  

# define class labels
labels = [1,1,1,1,1,0,0,0,0,0]  

# integer encode the documents
vocab_size = 50
encoded_docs = [one_hot(d, vocab_size) for d in docs]
print(encoded_docs)

# pad documents to a max length of 4 words
max_length = 4
padded_docs=pad_sequences(encoded_docs,maxlen=max_length, padding='post')
print(padded_docs)

# define the model
model = Sequential()
model.add(Embedding(vocab_size, 8, input_length=max_length))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())

# fit the model
model.fit(padded_docs, labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))

 

使用预训练的glove嵌入实例:

 

 

from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
# define documents
docs = ['Well done!',
		'Good work',
		'Great effort',
		'nice work',
		'Excellent!',
		'Weak',
		'Poor effort!',
		'not good',
		'poor work',
		'Could have done better.']
# define class labels
labels = [1,1,1,1,1,0,0,0,0,0]
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(docs)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(docs)
print(encoded_docs)
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open('../glove_data/glove.6B/glove.6B.100d.txt')
for line in f:
	values = line.split()
	word = values[0]
	coefs = asarray(values[1:], dtype='float32')
	embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
	embedding_vector = embeddings_index.get(word)
	if embedding_vector is not None:
		embedding_matrix[i] = embedding_vector
# define model
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=4, trainable=False)
model.add(e)
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs, labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))

 

 

 

 

 

3.使用keras做情感分析:在这里使用的是keras自带的imdb数据

其中已经给每个词标注了一个索引(index),每段文字的每一个词对应了一个数字。

加载数据看一下:

 

from keras.datasets import imdb
import numpy as np


(X_train,y_train),(X_test,y_test) = imdb.load_data()

#看一下每个数据有多少词:
avg_len = list(map(len, X_train))
print(np.mean(avg_len))
#平均子长为238.714

#画一个直方图,直观显示:
import matplotlib.pyplot as plt
plt.hist(avg_len, bins=range(min(avg_len), max(avg_len) + 50, 50))


开始进行情感分析:

 

 

from keras.models import Sequential
from keras.layers import Dense,Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import numpy as np
from keras.datasets import imdb


(X_train,y_train),(X_test,y_test) = imdb.load_data()

#keras的pad_sequences 函数可以实现文本填充,
#maxlen指示文本最长的单词数,超过默认截取后面的字符
max_word = 400
X_train = sequence.pad_sequences(X_train, maxlen=max_word)
X_test = sequence.pad_sequences(X_test, maxlen=max_word)

vocab_size = np.max([np.max(X_train[i]) for i in range (X_train.shape[0])]) + 1
                    
#keras提供了Endedding
#参数是文本的词向量数量,词向量的维度,每个文本输入的长度
model = Sequential()
model.add(Embedding(vocab_size, 64, input_length=max_word))
model.add(Flatten())
model.add(Dense(2000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(1,activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=100, verbose=1)
score = model.evaluate(X_test, y_test)
#精度大约在85%左右

 

卷积神经网络的搭建模型如下:

 

 

from keras.layers import Conv1D
from keras.layers import MaxPool1D
from keras.layers import Dropout

model = Sequential()
model.add(Embedding(vocab_size, 64, input_length=max_word))

model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.25))

model.add(Conv1D(filters=128, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.25))

model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(model.summary())

 

lstm网络搭建如下:

 

 

from keras.layers import LSTM

model = Sequential()
model.add(Embedding(vocab_size, 64, input_length=max_word))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(64, return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(32, return_sequences=True))
model.add(Dropout(0.2))

model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(model.summary())


这都是简单的实现网络结构,keras的优点就是使用简单,让人把注意力放在模型构建和优化上。

 

以上lstm的准确率略高一点。

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值