from pyltp import Segmentor import jieba model_path = "E:/ltp3_4/cws.model" content = "我毕业于清华大学,我朋友的名字叫戴掵莉,我哥们的名字叫付先军;阿尔艾斯是我的村庄名字" seg = Segmentor() seg.load(model_path) # 加载语言模型 用于分词 words = seg.segment(content) seg_words = " ".join(words) print("LTP: ", " /".join(words)) jiebaWords = jieba.cut(content, HMM=True) print("jieba: ", " /".join(jiebaWords)) print(seg_words) # 词性标注 from pyltp import Postagger pos = Postagger() model_path = "E:/ltp3_4/pos.model" pos.load(model_path) # 导入词性标注模型 pos_words = pos.postag(seg_words.split(" ")) for word, pt in zip(seg_words.split(" "), pos_words): print(word + "/" + pt) """ 从分词结果上来看,对于新词的识别ltp分词方法远高于结巴分词 """
结巴分词与ltp分词算法的比较:对于新词的识别ltp分词方法远高于结巴分词
最新推荐文章于 2021-12-22 10:29:12 发布