kaggle影评情绪分类比赛调整版

kaggle情绪分类比赛打了好久,从最开始的暴力版,一直更新到现在终于有个稍微能看的识别率了,故记录下过程,之后再调整一波试试:

import pandas as pd
from keras.layers import Dense,LSTM,Bidirectional,Embedding,Conv1D,MaxPooling1D,GlobalMaxPooling1D,Dropout,SpatialDropout1D,GRU
from keras.models import Sequential
import keras.preprocessing as preprocessing
from nltk.corpus import stopwords
from keras.utils.np_utils import to_categorical
import nltk
import matplotlib.pyplot as plt
import os
import re
import numpy as np
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from nltk import WordNetLemmatizer,word_tokenize
from keras import Model,Input,layers
stoplist = stopwords.words('english')
data_train = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/train.tsv',sep='\t')
data_test = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/test.tsv',sep='\t')
sub = pd.read_csv(r'../input/movie-review-sentiment-analysis-kernels-only/sampleSubmission.csv')
data_train_X = data_train.Phrase.values
X_test = list(data_test.Phrase.values)
data_train_Y = list(data_train.Sentiment.values)
lemmat = WordNetLemmatizer()
def clean(data):
    data = [re.sub('[^a-zA-Z]',' ',word) for word in  data] #去非英文字符
    data_x = []
    for i in (data):
        data_word = word_tokenize(i) #分词
        # data_word = [word for word in data_word if word not in stoplist ] #去停用词
        data_word_result = []
        for word in data_word:
            if word in stoplist:
                data_word.remove(word)
                data_word_result.append(word)
            elif data_word ==  []:
                for j in  data_word_result:
                    data_word.append(j)

        data_word1= [lemmat.lemmatize(word.lower()) for word in data_word] #去时态语态
        data_word1 = ' '.join(data_word1)
        data_x.append(data_word1)
    return data_x
data_train_X = clean(data_train_X)
# data_train_Y = [data_train_Y[i] for i in range(len(data_train_Y)) if i not in data_train_X_emp]
data_train_Y = to_categorical(data_train_Y)
X_train,X_val,Y_train,Y_val = train_test_split(data_train_X,data_train_Y,stratify=data_train_Y,test_size=0.2,random_state=123)#切分数据
def count_word(data_word):
    word_set = set()
    for j in data_word:
        for k in word_tokenize(j):
            word_set.add(k)
    word_count = len(word_set)  # 统计词的个数
    return word_count
X_train_count = count_word(X_train)
# print(X_train_count)
# X_val,X_val_count = clean(X_val)
# # Y_train = [Y_train[i] for i in range(len(Y_train)) if i not in X_emp] #去掉空列表的分类项
# # Y_val = [Y_val[i] for i in range(len(Y_val)) if i not in X_val_emp]
# # Y_train = to_categorical(Y_train)
# # # Y_val = to_categorical(Y_val)
def word_maxlen(data_x): #统计最长的字符长度
    len_list = []
    for i in data_x:
        i = word_tokenize(i)
        len_list.append(len(i))
    maxlen = max(len_list)
    return maxlen
X_train_maxlen = word_maxlen(X_train)
tokenizer = Tokenizer(num_words=X_train_count) #创建分词器
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_train = preprocessing.sequence.pad_sequences(X_train,maxlen=X_train_maxlen) #截断和补全
X_val = tokenizer.texts_to_sequences(X_val)
X_val = preprocessing.sequence.pad_sequences(X_val,maxlen=X_train_maxlen)

word_index  = tokenizer.word_index #找回索引
glove_base = '../input/mydata123'
glove_dir = os.path.join(glove_base,'glove.6B.100d.txt') #使用预训练好的词向量
emb_vec_dict = {}
with open(glove_dir,'r',encoding='utf8') as f:
    for line in f:
        line = line.split()
        word = line[0]
        index = np.asarray(line[1:],dtype='float32')
        emb_vec_dict[word] = index
emd_dim = 100

emb_num = np.stack(emb_vec_dict.values()) #拼接所有的矩阵在一起
emb_mat = np.random.normal(emb_num.mean(),emb_num.std(),(X_train_count,emd_dim)) #初始化矩阵,对向量进行均值与方差处理
for word,i in word_index.items():
    if i < X_train_count:
        emb_vec = emb_vec_dict.get(word)
        if emb_vec is not None:
            emb_mat[i] = emb_vec
X_test = clean(X_test)
X_test = tokenizer.texts_to_sequences(X_test)
X_test = preprocessing.sequence.pad_sequences(X_test,maxlen=X_train_maxlen)
# print(emb_mat)
model = Sequential()
model.add(Embedding(X_train_count,emd_dim,input_length=X_train_maxlen,weights=[emb_mat],trainable=True)) #嵌入到 (X_train_count,emd_dim)的矩阵中,冻结嵌入层,
model.add(Conv1D(127,7,padding='same'))
model.add(MaxPooling1D(5))
model.add(SpatialDropout1D(0.5))
model.add(Bidirectional(GRU(128,return_sequences=True)))
model.add(Bidirectional(GRU(64)))
model.add(Dropout(0.8))
model.add(Dense(5,activation='softmax'))
# model.layers[0].set_weights([emb_mat]) #预训练的词嵌入加载到embedding层中
# model.layers[0].trainable = False #冻结embedding层
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
history = model.fit(X_train,Y_train,batch_size=1024,epochs=8,validation_data=(X_val,Y_val),verbose=1)
y_pred=model.predict_classes(X_test, verbose=1)
sub.Sentiment=y_pred
sub.to_csv('re2.csv',index=False)

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值