"""
Created on Tue Sep 11 18:46:22 2018
@author: lilong
"""
"""
由原始文本进行分词后保存到新的文件
"""
import jieba
import numpy as np
filePath='data/corpus_1.txt'
fileSegWordDonePath ='data/corpusSegDone_1.txt'
def PrintListChinese(list):
for i in range(len(list)):
print (list[i])
fileTrainRead = []
with open(filePath,'r',encoding='utf-8') as fileTrainRaw:
for line in fileTrainRaw:
print(line)
fileTrainRead.append(line)
fileTrainSeg=[]
for i in range(len(fileTrainRead)):
fileTrainSeg.append([' '.join(list(jieba.cut(fileTrainRead[i][9:-11],cut_all=False)))])
if i % 100 == 0:
print(i)
with open(fileSegWordDonePath,'w',encoding='utf-8') as fW:
for i in range(len(fileTrainSeg)):
fW.write(fileTrainSeg[i][0])
fW.write('\n')
"""
gensim word2vec获取词向量
"""
import warnings
import logging
import os.path
import sys
import multiprocessing
import gensim
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
inp = 'data/corpusSegDone_1.txt'
out_model = 'data/corpusSegDone_1.model'
out_vector = 'data/corpusSegDone_1.vector'
model = Word2Vec(LineSentence(inp), size=50, window=5, min_count=5,
workers=multiprocessing.cpu_count())
model.save(out_model)
model.wv.save_word2vec_format(out_vector, binary=False)