(1)首先准备好数据,这是未标记的数据形式。
(2)stopword 的数据
然后根据数据,设计代码。
word2vec训练词向量
import os
import re
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import nltk.data
#nltk.download()
#from nltk.corpus import stopwords
from gensim.models.word2vec import Word2Vec
#加载数据
def load_dataset(name, nrows=None):
datasets = {
'unlabeled_train': 'unlabeledTrainData.tsv',
'labeled_train': 'labeledTrainData.tsv',
'test': 'testData.tsv'
}
if name not in datasets:
raise ValueError(name)
data_file = os.path.join('..', 'data', datasets[name])
df = pd.read_csv(data_file, sep='\t', escapechar='\\', nrows=nrows)
print('Number of reviews: {}'.format(len(df)))
return df
df = load_dataset('unlabeled_train')
df.head()
数据的预处理
#留了个候选,可以去除停用词,也可以不去除停用词。
#eng_stopwords = set(stopwords.words('english'))
eng_stopwords = {}.fromkeys([ line.rstrip() for line in open('../stopwords.txt')])
#定义清理数据的信息。
def clean_text(text, remove_stopwords=False):
text = BeautifulSoup(text, 'html.parser').get_text()
text = re.sub(r'[^a-zA-Z]', ' ', text)
words = text.lower().split()
if remove_stopwords:
words = [w for w in words if w not in eng_stopwords]
return words
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
#每去调用一次split——sentences就输出一下。
def print_call_counts(f):
n = 0
def wrapped(*args, **kwargs):
nonlocal n
n += 1
if n % 1000 == 1:
print('method {} called {} times'.format(f.__name__, n))
return f(*args, **kwargs)
return wrapped
@print_call_counts
def split_sentences(review):
raw_sentences = tokenizer.tokenize(review.strip())
sentences = [clean_text(s) for s in raw_sentences if s]
return sentences
time sentences = sum(df.review.apply(split_sentences), [])
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# 设定词向量训练的参数
num_features = 300 # Word vector dimensionality,建议300,或者500
min_word_count = 40 # Minimum word count 至少出现40ci
num_workers = 4 # Number of threads to run in parallel,线程
context = 10 # Context window size 上下文,窗口大小
downsampling = 1e-3 # Downsample setting for frequent words gensuim,要用的,下采样的。
# nnlm 有投射层,4个窗口,用前3个预测第四个,有10W个词库,,三个 one-hot, 就是 1*10w
# 下一次,投影层。初始化权重,稠密向量。
# C=300*10W,C与one-hot,相乘。变成300*1,三个加起来,就变成了 900*1
# Hidden layer :900*500。900*1,变成了500*1,也有Wx+b 最后10W的维度。
#
# Cbow(连续词库)
# 用 我喜欢 机器学习
# 预测 学习
# 我喜欢机器学习,就是四个词向量。
# 直接求和,来预测词的概率。
# Window就是 滑动窗口。。滑动窗口开着,把所有为4 的窗口 扫描出来。
# Min_word=40: 出现次数至少 40次。
#
# Toma模型:
# 学习,来预测: 我喜欢 机器学习。
model_name = '{}features_{}minwords_{}context.model'.format(num_features, min_word_count, context)
print('Training model...')
#初始化
model = word2vec.Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sample = downsampling)
# If you don't plan to train the model any further, calling init_sims will make the model much more #memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model.save(os.path.join('..', 'models', model_name))
print(model.doesnt_match("man woman child kitchen".split()))
print(model.doesnt_match('france england germany berlin'.split()))
print(model.most_similar("man"))#关联度最高的。
最后生成了一个 40多兆的数据模型
数据输出的结果: