1 #-*- coding: utf-8 -*-
2 importos3 from pyltp importSegmentor, Postagger, Parser, NamedEntityRecognizer4 from collections importOrderedDict5
6 classLtpParser():7 def __init__(self):8 LTP_DIR = "../ltp_model"
9 self.segmentor =Segmentor()10 self.segmentor.load_with_lexicon(os.path.join(LTP_DIR, "cws.model"), os.path.join(LTP_DIR, "word_dict.txt")) #加载外部词典
11
12 self.postagger =Postagger()13 self.postagger.load_with_lexicon(os.path.join(LTP_DIR, "pos.model"), os.path.join(LTP_DIR, "n_word_dict.txt")) #加载外部词典
14
15 #self.parser = Parser()
16 #self.parser.load(os.path.join(LTP_DIR, "parser.model")) #依存句法分析
17
18 self.recognizer =NamedEntityRecognizer()19 self.recognizer.load(os.path.join(LTP_DIR, "ner.model"))#实体识别
20
21 ##加载停词
22 #with open(LTP_DIR + '/stopwords.txt', 'r', encoding='utf8') as fread:
23 #self.stopwords = set()
24 #for line in fread:
25 #self.stopwords.add(line.strip())
26
27 '''把实体和词性给进行对应'''
28 defwordspostags(self, name_entity_dist, words, postags):29 pre = ' '.join([item[0] + '/' + item[1] for item inzip(words, postags)])30 post =pre31 for et, infos inname_entity_dist.items():32 ifinfos:33 for info ininfos:34 post = post.replace(' '.join(info['consist']), info['name'])35 post = [word for word in post.split(' ') if len(word.split('/')) ==