Python数据分析与机器学习-使用Gensim库构造中文维基百度数据词向量模型

1.把维基百科语料库解析成文本格式

下载维基百科语料库:

https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2

运行如下命令:

python process.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.fanti.text

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 修改后的代码如下:
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus

'''
https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2
从如上地址下载维基百科语料库

把维基百科下载的zhwiki-latest-pages-articles.xml.bz2解析成中文文本格式wiki.zh.fanti.text
'''
if __name__ == '__main__':

    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    if len(sys.argv) < 3:
        print(globals()['__doc__'] % locals())
        sys.exit(1)
    inp, outp = sys.argv[1:3]
    space = ' '
    i = 0
    output = open(outp, 'w', encoding='utf-8')
    wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
    for text in wiki.get_texts():
        s = space.join(text) + "\n"
        output.write(s)
        i = i + 1
        if (i % 10000 == 0):
            logger.info("Saved " + str(i) + " articles")
    output.close()
    logger.info("Finished Saved " + str(i) + " articles")
# python process.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.fanti.text

2.下载opencc把繁体中文转化为简体中文

opencc(windows版本)下载地址:http://download.csdn.net/download/adam_zs/10213686

运行命令:

opencc -i wiki.zh.fanti.text -o wiki.zh.jianti.text -c t2s.json

3.用jieba分词器分词

import jieba
import jieba.analyse
import jieba.posseg as pseg
import codecs, sys


def cut_words(sentence):
    # print sentence
    return " ".join(jieba.cut(sentence)).encode('utf-8')


f = codecs.open('wiki.zh.jianti.text', 'r', encoding="utf8")  # 输入
target = codecs.open("wiki.zh.jianti.jieba.text", 'w', encoding="utf8")  # 输出
print('open files')
line_num = 1
line = f.readline()
while line:
    print('---- processing ', line_num, ' article----------------')
    line_seg = " ".join(jieba.cut(line))
    target.writelines(line_seg)
    line_num = line_num + 1
    line = f.readline()
f.close()
target.close()
exit()
while line:
    curr = []
    for oneline in line:
        # print(oneline)
        curr.append(oneline)
    after_cut = map(cut_words, curr)
    target.writelines(after_cut)
    print('saved', line_num, 'articles')
    exit()
    line = f.readline1()
f.close()
target.close()

# python Testjieba.py

4.建立word2vec模型

运行命令:

python word2vec_model.py wiki.zh.jianti.jieba.text wiki.zh.text.model wiki.zh.text.vector

import logging
import os.path
import sys
import multiprocessing
from gensim.corpora import WikiCorpus
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence

'''建模操作'''
if __name__ == '__main__':

    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    if len(sys.argv) < 4:
        print(globals()['__doc__'] % locals())
        sys.exit(1)
    inp, outp1, outp2 = sys.argv[1:4]
    model = Word2Vec(LineSentence(inp), size=400, window=5, min_count=5, workers=multiprocessing.cpu_count())
    model.save(outp1)
    model.model.wv.save_word2vec_format(outp2, binary=False)

# wiki.zh.jianti.jieba.text  分好词的数据
# wiki.zh.text.model  生成model的名字
# python word2vec_model.py wiki.zh.jianti.jieba.text wiki.zh.text.model wiki.zh.text.vector

#opencc -i wiki.zh.fanti.text -o wiki.zh.jianti.text -c t2s.json

5.测试模型

from gensim.models import Word2Vec

# wiki.zh.text.model  建模好的model
en_wiki_word2vec_model = Word2Vec.load('wiki.zh.text.model')

testwords = ['苹果', '数学', '学术', '白痴', '篮球']
for i in range(5):
    res = en_wiki_word2vec_model.most_similar(testwords[i])
    print(testwords[i])
    print(res)
测试结果:

苹果
[('可口可乐', 0.6874077320098877), ('饮料', 0.6626321077346802), ('快餐', 0.6543407440185547), ('蛋挞', 0.6516821980476379), ('月饼', 0.6494894027709961), ('肯德基', 0.645319402217865), ('速食', 0.6439262628555298), ('奶茶', 0.6300481557846069), ('口香糖', 0.6300472617149353), ('饼干', 0.6289315819740295)]
数学
[('微积分', 0.7499909400939941), ('物理', 0.7221037149429321), ('数论', 0.720645010471344), ('几何学', 0.7173912525177002), ('高等数学', 0.7114632725715637), ('算术', 0.7057009339332581), ('概率论', 0.7033498883247375), ('几何', 0.7012019157409668), ('代数', 0.6924819946289062), ('拓扑学', 0.6891473531723022)]
学术
[('学术研究', 0.7907266020774841), ('自然科学', 0.6869786977767944), ('政经', 0.6617612838745117), ('社会科学', 0.6519638299942017), ('汉学', 0.6506223678588867), ('政治学', 0.6459405422210693), ('学术界', 0.6428353190422058), ('伦理', 0.6399595737457275), ('法学', 0.6366109848022461), ('哲学', 0.6340681314468384)]
白痴
[('耳语', 0.794904351234436), ('合填', 0.7509185671806335), ('共舞', 0.7435204982757568), ('合导', 0.7250716686248779), ('小黑', 0.7155478000640869), ('小苦妹', 0.7149988412857056), ('朱延平', 0.7116649150848389), ('妈咪', 0.7116390466690063), ('苦恋', 0.7057608366012573), ('调皮', 0.7013573050498962)]
篮球
[('网球', 0.8196929693222046), ('排球', 0.8160654902458191), ('拳击', 0.8032258152961731), ('体操', 0.7993974685668945), ('曲棍球', 0.7984918355941772), ('美式足球', 0.7910690903663635), ('乒乓球', 0.7820394039154053), ('棒球', 0.7811607122421265), ('田径', 0.7749152183532715), ('羽毛球', 0.7746673822402954)]






评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值