基于word2vec的分类任务

在这里插入图片描述

# bs4 nltk gensim
import os
import re
import numpy as np
import pandas as pd

from bs4 import BeautifulSoup

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import nltk
from nltk.corpus import stopwords

用pandas读入训练数据

df = pd.read_csv('../data/labeledTrainData.tsv', sep='\t', escapechar='\\')
print('Number of reviews: {}'.format(len(df)))
df.head()
Number of reviews: 25000
idsentimentreview
05814_81With all this stuff going down at the moment w...
12381_91"The Classic War of the Worlds" by Timothy Hin...
27759_30The film starts with a manager (Nicholas Bell)...
33630_40It must be assumed that those who praised this...
49495_81Superbly trashy and wondrously unpretentious 8...

对影评数据做预处理,大概有以下环节:

  1. 去掉html标签
  2. 移除标点
  3. 切分成词/token
  4. 去掉停用词
  5. 重组为新的句子
df['review'][1000]
"I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept 
getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, and 
jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members were cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one over 8 
watch it, it's just too annoying."
#去掉HTML标签的数据
example = BeautifulSoup(df['review'][1000], 'html.parser').get_text()
example
"I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept 
getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, and jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members were 
cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one over 8
 watch it, it's just too annoying."
#去掉标点符号
example_letters = re.sub(r'[^a-zA-Z]', ' ', example)
example_letters
'I watched this movie really late last night and usually if it s late then I m pretty forgiving of movies  Although I tried  I just could not stand this movie at all  it kept 
getting worse and worse as the movie went on  Although I know it s suppose to be a comedy but I didn t find it very funny  It was also an especially unrealistic  
and jaded portrayal of rural life  In case this is what any of you think country life is like  it s definitely not  I do have to agree that some of the guy cast members 
were cute  but the french guy was really fake  I do have to agree that it tried to have a good lesson in the story  but overall my recommendation is that no one 
over   watch it  it s just too annoying '

words = example_letters.lower().split()
words
['i',
 'watched',
 'this',
 'movie',
 'really',
 'late',
 'last',
 'night',
 'and',
 'usually',
 'if',
 'it',
 's',
 'late',
 'then',
 'i',
 'm',
 'pretty',
 'forgiving',
 'of',
 'movies',
 'although',
 'i',
 'tried',
 'i',
 'just',
 'could',
 'not',
 'stand',
 'this',
 'movie',
 'at',
 'all',
 'it',
 'kept',
 'getting',
 'worse',
 'and',
 'worse',
 'as',
 'the',
 'movie',
 'went',
 'on',
 'although',
 'i',
 'know',
 'it',
 's',
 'suppose',
 'to',
 'be',
 'a',
 'comedy',
 'but',
 'i',
 'didn',
 't',
 'find',
 'it',
 'very',
 'funny',
 'it',
 'was',
 'also',
 'an',
 'especially',
 'unrealistic',
 'and',
 'jaded',
 'portrayal',
 'of',
 'rural',
 'life',
 'in',
 'case',
 'this',
 'is',
 'what',
 'any',
 'of',
 'you',
 'think',
 'country',
 'life',
 'is',
 'like',
 'it',
 's',
 'definitely',
 'not',
 'i',
 'do',
 'have',
 'to',
 'agree',
 'that',
 'some',
 'of',
 'the',
 'guy',
 'cast',
 'members',
 'were',
 'cute',
 'but',
 'the',
 'french',
 'guy',
 'was',
 'really',
 'fake',
 'i',
 'do',
 'have',
 'to',
 'agree',
 'that',
 'it',
 'tried',
 'to',
 'have',
 'a',
 'good',
 'lesson',
 'in',
 'the',
 'story',
 'but',
 'overall',
 'my',
 'recommendation',
 'is',
 'that',
 'no',
 'one',
 'over',
 'watch',
 'it',
 'it',
 's',
 'just',
 'too',
 'annoying']

在这里插入图片描述

#去停用词
stopwords = {}.fromkeys([ line.rstrip() for line in open('../stopwords.txt')])
words_nostop = [w for w in words if w not in stopwords]
words_nostop
['watched',
 'movie',
 'late',
 'night',
 'late',
 'pretty',
 'forgiving',
 'movies',
 'stand',
 'movie',
 'worse',
 'worse',
 'movie',
 'suppose',
 'comedy',
 'didn',
 'funny',
 'unrealistic',
 'jaded',
 'portrayal',
 'rural',
 'life',
 'country',
 'life',
 'agree',
 'guy',
 'cast',
 'cute',
 'french',
 'guy',
 'fake',
 'agree',
 'lesson',
 'story',
 'recommendation',
 'watch',
 'annoying']
eng_stopwords = set(stopwords)

def clean_text(text):
    text = BeautifulSoup(text, 'html.parser').get_text()
    text = re.sub(r'[^a-zA-Z]', ' ', text)
    words = text.lower().split()
    words = [w for w in words if w not in eng_stopwords]
    return ' '.join(words)
df['review'][1000]
"I watched this movie really late last night and usually if it's late then I'm pretty forgiving of movies. Although I tried, I just could not stand this movie at all, it kept
 getting worse and worse as the movie went on. Although I know it's suppose to be a comedy but I didn't find it very funny. It was also an especially unrealistic, 
 and jaded portrayal of rural life. In case this is what any of you think country life is like, it's definitely not. I do have to agree that some of the guy cast members 
 were cute, but the french guy was really fake. I do have to agree that it tried to have a good lesson in the story, but overall my recommendation is that no one 
 over 8 watch it, it's just too annoying."
clean_text(df['review'][1000])
'watched movie late night late pretty forgiving movies stand movie worse worse movie suppose comedy didn funny unrealistic jaded portrayal rural life country 
life agree guy cast cute french guy fake agree lesson story recommendation watch annoying'

清洗数据添加到dataframe里

df['clean_review'] = df.review.apply(clean_text)
df.head()
idsentimentreviewclean_review
05814_81With all this stuff going down at the moment w...stuff moment mj ve started listening music wat...
12381_91"The Classic War of the Worlds" by Timothy Hin...classic war worlds timothy hines entertaining ...
27759_30The film starts with a manager (Nicholas Bell)...film starts manager nicholas bell investors ro...
33630_40It must be assumed that those who praised this...assumed praised film filmed opera didn read do...
49495_81Superbly trashy and wondrously unpretentious 8...superbly trashy wondrously unpretentious explo...

抽取bag of words特征(用sklearn的CountVectorizer)

vectorizer = CountVectorizer(max_features = 5000) 
train_data_features = vectorizer.fit_transform(df.clean_review).toarray()
train_data_features.shape
(25000, 5000)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train_data_features,df.sentiment,test_size = 0.2, random_state = 0)
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=0)
    plt.yticks(tick_marks, classes)

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

训练分类器

LR_model = LogisticRegression()
LR_model = LR_model.fit(X_train, y_train)
y_pred = LR_model.predict(X_test)
cnf_matrix = confusion_matrix(y_test,y_pred)

print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))

print("accuracy metric in the testing dataset: ", (cnf_matrix[1,1]+cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[1,1]+cnf_matrix[1,0]+cnf_matrix[0,1]))

# Plot non-normalized confusion matrix
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cnf_matrix
                      , classes=class_names
                      , title='Confusion matrix')
plt.show()
Recall metric in the testing dataset:  0.853181076672
accuracy metric in the testing dataset:  0.8454

在这里插入图片描述

df = pd.read_csv('../data/unlabeledTrainData.tsv', sep='\t', escapechar='\\')
print('Number of reviews: {}'.format(len(df)))
df.head()
Number of reviews: 50000
idreview
09999_0Watching Time Chasers, it obvious that it was ...
145057_0I saw this film about 20 years ago and remembe...
215561_0Minor Spoilers<br /><br />In New York, Joan Ba...
37161_0I went to see this film with a great deal of e...
443971_0Yes, I agree with everyone on this site this m...
df['clean_review'] = df.review.apply(clean_text)
df.head()
idreviewclean_review
09999_0Watching Time Chasers, it obvious that it was ...watching time chasers obvious bunch friends si...
145057_0I saw this film about 20 years ago and remembe...film ago remember nasty based true incident br...
215561_0Minor Spoilers<br /><br />In New York, Joan Ba...minor spoilersin york joan barnard elvire audr...
37161_0I went to see this film with a great deal of e...film deal excitement school director friend mi...
443971_0Yes, I agree with everyone on this site this m...agree site movie bad call movie insult movies ...
review_part = df['clean_review']
review_part.shape
(50000,)
import warnings
warnings.filterwarnings("ignore")

tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')


def split_sentences(review):
    raw_sentences = tokenizer.tokenize(review.strip())
    sentences = [clean_text(s) for s in raw_sentences if s]
    return sentences
sentences = sum(review_part.apply(split_sentences), [])
print('{} reviews -> {} sentences'.format(len(review_part), len(sentences)))
50000 reviews -> 50000 sentences
sentences[0]
'watching time chasers obvious bunch friends sitting day film school hey pool money bad movie bad movie dull story bad script lame acting poor cinematography 
bottom barrel stock music corners cut prevented film release life'
sentences_list = []
for line in sentences:
    sentences_list.append(nltk.word_tokenize(line))
  • sentences:可以是一个list

  • sg: 用于设置训练算法,默认为0,对应CBOW算法;sg=1则采用skip-gram算法。

  • size:是指特征向量的维度,默认为100。大的size需要更多的训练数据,但是效果会更好. 推荐值为几十到几百。

  • window:表示当前词与预测词在一个句子中的最大距离是多少

  • alpha: 是学习速率

  • seed:用于随机数发生器。与初始化词向量有关。

  • min_count: 可以对字典做截断. 词频少于min_count次数的单词会被丢弃掉, 默认值为5

  • max_vocab_size: 设置词向量构建期间的RAM限制。如果所有独立单词个数超过这个,则就消除掉其中最不频繁的一个。每一千万个单词需要大约1GB的RAM。设置成None则没有限制。

  • workers参数控制训练的并行数。

  • hs: 如果为1则会采用hierarchica·softmax技巧。如果设置为0(defau·t),则negative sampling会被使用。

  • negative: 如果>0,则会采用negativesamp·ing,用于设置多少个noise words

  • iter: 迭代次数,默认为5

# 设定词向量训练的参数
num_features = 300    # Word vector dimensionality
min_word_count = 40   # Minimum word count
num_workers = 4       # Number of threads to run in parallel
context = 10          # Context window size
model_name = '{}features_{}minwords_{}context.model'.format(num_features, min_word_count, context)
from gensim.models.word2vec import Word2Vec
model = Word2Vec(sentences_list, workers=num_workers, \
            size=num_features, min_count = min_word_count, \
            window = context)

# If you don't plan to train the model any further, calling 
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)

# It can be helpful to create a meaningful model name and 
# save the model for later use. You can load it later using Word2Vec.load()
model.save(os.path.join('..', 'models', model_name))
print(model.doesnt_match(['man','woman','child','kitchen']))
#print(model.doesnt_match('france england germany berlin'.split())
kitchen
model.most_similar("boy")
[('girl', 0.7018299698829651),
 ('astro', 0.6647905707359314),
 ('teenage', 0.6317306160926819),
 ('frat', 0.60948246717453),
 ('dad', 0.6011481285095215),
 ('yr', 0.6010577082633972),
 ('teenager', 0.5974895358085632),
 ('brat', 0.5941195487976074),
 ('joshua', 0.5832049250602722),
 ('father', 0.5825375914573669)]
model.most_similar("bad")
[('worse', 0.7071679830551147),
 ('horrible', 0.7065873742103577),
 ('terrible', 0.6872220635414124),
 ('sucks', 0.6666240692138672),
 ('crappy', 0.6634873747825623),
 ('lousy', 0.6494461297988892),
 ('horrendous', 0.6371070742607117),
 ('atrocious', 0.62550288438797),
 ('suck', 0.6224384307861328),
 ('awful', 0.619296669960022)]
df = pd.read_csv('../data/labeledTrainData.tsv', sep='\t', escapechar='\\')
df.head()
idsentimentreview
05814_81With all this stuff going down at the moment w...
12381_91"The Classic War of the Worlds" by Timothy Hin...
27759_30The film starts with a manager (Nicholas Bell)...
33630_40It must be assumed that those who praised this...
49495_81Superbly trashy and wondrously unpretentious 8...
from nltk.corpus import stopwords
eng_stopwords = set(stopwords.words('english'))

def clean_text(text, remove_stopwords=False):
    text = BeautifulSoup(text, 'html.parser').get_text()
    text = re.sub(r'[^a-zA-Z]', ' ', text)
    words = text.lower().split()
    if remove_stopwords:
        words = [w for w in words if w not in eng_stopwords]
    return words

def to_review_vector(review):
    global word_vec
    
    review = clean_text(review, remove_stopwords=True)
    #print (review)
    #words = nltk.word_tokenize(review)
    word_vec = np.zeros((1,300))
    for word in review:
        #word_vec = np.zeros((1,300))
        if word in model:
            word_vec += np.array([model[word]])
    #print (word_vec.mean(axis = 0))
    return pd.Series(word_vec.mean(axis = 0))

train_data_features = df.review.apply(to_review_vector)
train_data_features.head()
0123456789...290291292293294295296297298299
0-0.6966640.903903-0.625330-1.0040560.3043150.757687-0.5851061.0637580.361671-1.063279...-2.5868640.0981971.739277-0.4275581.084251-2.5002751.8449000.923524-0.471286-0.478899
10.888799-0.4497731.340381-3.6446672.221354-2.437322-1.3996870.5395502.5635070.984283...0.9325570.607203-0.594103-0.159929-1.501902-1.2177420.1153451.562480-0.023147-0.065639
20.5898624.321714-0.6522155.326607-8.7390100.0055901.371678-0.868081-1.485593-2.200574...-1.3012500.6322172.265594-2.1358242.925084-0.933688-0.872354-0.567039-3.797528-2.027258
3-1.029406-0.3873850.504282-1.223156-0.7338920.389869-1.111555-0.7031933.4058830.458893...-2.2933090.6158990.1095690.8091280.798448-1.6263980.3404301.064533-0.892466-0.318342
4-2.3434732.814057-2.8229861.471130-4.2526370.1174153.3096420.895924-2.021818-0.558035...-2.0754770.9502464.536721-1.7285542.433016-1.895700-0.2147960.251841-3.280589-3.242330

5 rows × 300 columns

from sklearn.cross_validation import train_test_split

X_train, X_test, y_train, y_test = train_test_split(train_data_features,df.sentiment,test_size = 0.2, random_state = 0)
LR_model = LogisticRegression()
LR_model = LR_model.fit(X_train, y_train)
y_pred = LR_model.predict(X_test)
cnf_matrix = confusion_matrix(y_test,y_pred)

print("Recall metric in the testing dataset: ", cnf_matrix[1,1]/(cnf_matrix[1,0]+cnf_matrix[1,1]))

print("accuracy metric in the testing dataset: ", (cnf_matrix[1,1]+cnf_matrix[0,0])/(cnf_matrix[0,0]+cnf_matrix[1,1]+cnf_matrix[1,0]+cnf_matrix[0,1]))

# Plot non-normalized confusion matrix
class_names = [0,1]
plt.figure()
plot_confusion_matrix(cnf_matrix
                      , classes=class_names
                      , title='Confusion matrix')
plt.show()
Recall metric in the testing dataset:  0.87969004894
accuracy metric in the testing dataset:  0.865

在这里插入图片描述

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小刘要努力。

顺便点一个赞

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值