首先需要一份比较大的中文语料数据,可以考虑中文的维基百科(也可以试试搜狗的新闻语料库)。中文维基百科的打包文件地址为
https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2
中文维基百科的数据不是太大,xml的压缩文件大约1G左右。首先用 process_wiki_data.py处理这个XML压缩文件,执行:python process_wiki_data.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# process_wiki_data.py 用于解析XML,将XML的wiki数据转换为text格式
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print (globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
space = " "
i = 0
output = open(outp, 'w')
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
for text in wiki.get_texts():
output.write(space.join(text) + "\n")
i = i + 1
if (i % 10000 == 0):
logger.info("Saved " + str(i) + " articles")
output.close()
logger.info("Finished Saved " + str(i) + " articles")
Python用jieba完成分词,生成分词文件wiki.zh.text.seg
- 分词文件:seg.py
- import jieba
import jieba.analyse
import jieba.posseg as pseg
import codecs,sys
def cut_words(sentence):
#print sentence
return " ".join(jieba.cut(sentence)).encode('utf-8')
f=codecs.open('wiki.zh.text','r',encoding="utf8")
target = codecs.open("wiki.zh.text.seg", 'w',encoding="utf8")
print ('open files')
line_num=1
line = f.readline()
while line:
print('---- processing ', line_num, 'article----------------')
line_seg = " ".join(jieba.cut(line))
target.writelines(line_seg)
line_num = line_num + 1
line = f.readline()
f.close()
target.close()
exit()
while line:
curr = []
for oneline in line:
#print(oneline)
curr.append(oneline)
after_cut = map(cut_words, curr)
target.writelines(after_cut)
print ('saved ',line_num,' articles')
exit()
line = f.readline1()
f.close()
target.close() - 运行 python seg.py
- 接着用word2vec工具训练:
python train_word2vec_model.py wiki.zh.text.seg wiki.zh.text.model wiki.zh.text.vector
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# train_word2vec_model.py用于训练模型
import logging
import os.path
import sys
import multiprocessing
from gensim.corpora import WikiCorpus
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 4:
print globals()['__doc__'] % locals()
sys.exit(1)
inp, outp1, outp2 = sys.argv[1:4]
model = Word2Vec(LineSentence(inp), size=400, window=5, min_count=5,
workers=multiprocessing.cpu_count())
# trim unneeded model memory = use(much) less RAM
#model.init_sims(replace=True)
model.save(outp1)
model.save_word2vec_format(outp2, binary=False)
测试模型效果:
In [1]: import gensim
In [2]: model = gensim.models.Word2Vec.load("wiki.zh.text.model")
In [3]: model.most_similar(u"足球")
Out[3]:
[(u'\u8054\u8d5b', 0.6553816199302673),
(u'\u7532\u7ea7', 0.6530429720878601),
(u'\u7bee\u7403', 0.5967546701431274),
(u'\u4ff1\u4e50\u90e8', 0.5872289538383484),
(u'\u4e59\u7ea7', 0.5840631723403931),
(u'\u8db3\u7403\u961f', 0.5560152530670166),
(u'\u4e9a\u8db3\u8054', 0.5308005809783936),
(u'allsvenskan', 0.5249762535095215),
(u'\u4ee3\u8868\u961f', 0.5214947462081909),
(u'\u7532\u7ec4', 0.5177896022796631)]
In [4]: result = model.most_similar(u"足球")
In [5]: for e in result:
print e[0], e[1]
....:
联赛 0.65538161993
甲级 0.653042972088
篮球 0.596754670143
俱乐部 0.587228953838
乙级 0.58406317234
足球队 0.556015253067
亚足联 0.530800580978
allsvenskan 0.52497625351
代表队 0.521494746208
甲组 0.51778960228
In [6]: result = model.most_similar(u"男人")
In [7]: for e in result:
print e[0], e[1]
....:
女人 0.77537125349
家伙 0.617369174957
妈妈 0.567102909088
漂亮 0.560832381248
잘했어 0.540875017643
谎言 0.538448691368
爸爸 0.53660941124
傻瓜 0.535608053207
예쁘다 0.535151124001
mc刘 0.529670000076