预处理代码
分词
输入一句话
输出一个词的list
from pyltp import Segmentor
from zhon.hanzi import punctuation
def word_cut(sentence):
LTP_DATA_DIR = 'C:\\Users\\d84105613\\ltp_data'
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')
segmentor = Segmentor() # 初始化实例
segmentor.load_with_lexicon(cws_model_path, 'lexicon') # 加载模型
words = segmentor.segment(sentence)
segmentor.release()
words = list(words)
#print(len(set(words)))
words = [c for c in words if c not in punctuation]
return words
加载词向量
输入:第一行是voc_size and emb_size
下面是词向量
输出:词表,词向量
def loadWord2Vec(filename):
vocab = []
embd = []
cnt = 0
fr = open(filename,'r',encoding='utf-8')
line = fr.readline().strip()
#print line
word_dim = int(line.split(' ')[1