# encoding=utf-8
from __future__ import absolute_import
import os
import jieba
import jieba.posseg
from operator import itemgetter
_get_module_path = lambda path: os.path.normpath(os.path.join(os.getcwd(),
os.path.dirname(__file__), path))
#调用jieba/init.py 中_get_abs_path 函数赋值给变量 _get_abs_path
_get_abs_path = jieba._get_abs_path
DEFAULT_IDF = _get_module_path("idf.txt") #默认的逆文档频率文件路径
class KeywordExtractor(object):
#初始化的一个停用词典
STOP_WORDS = set((
"the", "of", "is", "and", "to", "in", "that", "we", "for", "an", "are",
"by", "be", "as", "on", "with", "can", "if", "from", "which", "you", "it",
"this", "then", "at", "have", "all", "not", "one", "has", "or", "that"
))
#设置自定义停用词
def set_stop_words(self, stop_words_path):
abs_path = _get_abs_path(stop_words_path) #获取自定义停用词路径
if not os.path.isfile(abs_path):
raise Exception("jieba: file does not exist: " + abs_path)
#文件存在,则读取文件内容,并添加到已有词典STOP_WORDS中
content = open(abs_path, 'rb').read().decode('utf-8')
for line in content.splitlines():
self.stop_words.add(line)
def extract_tags(self, *args, **kwargs):
raise NotImplementedError
#
class
jieba源码学习------TF-IDF方法 计算词权重
最新推荐文章于 2024-08-18 21:50:43 发布
本文深入探讨jieba库中TF-IDF方法的实现原理,通过源码学习如何计算词语在文本中的权重,理解Python中文本处理的关键技术。
摘要由CSDN通过智能技术生成