TF-IDF实现
-- coding: utf-8 --
from collections import defaultdict
import math
import operator
"""
函数说明:创建数据样本
Returns:
dataset - 实验样本切分的词条
classVec - 类别标签向量
"""
def loadDataSet():
dataset = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], # 切分的词条
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
# ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
# ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
# ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] # 类别标签向量,1代表好,0代表不好
return dataset
"""
函数说明:特征选择TF-IDF算法
Parameters:
list_words:词列表
Returns:
dict_feature_select:特征选择词字典
"""
def feature_select(list_words):
# 总词频统计
doc_frequency = defaultdict(int)
for word_list in list_words:
for i in word_list:
doc_frequency[i] += 1
# 计算每个词的TF值
word_tf = {} # 存储没个词的tf值
sum_num=sum(doc_frequency.values())
for i in doc_frequency:
word_tf[i] = doc_frequency[i] / sum_num
# 计算每个词的IDF值
doc_num = len(list_words)
word_idf = {} # 存储每个词的idf值
word_doc = defaultdict(int) # 存储包含该词的文档数,这么算是对的。
for i in doc_frequency:
for j in list_words:
if i in j:
word_doc[i] += 1
for i in doc_frequency:
word_idf[i] = math.log(doc_num / (word_doc[i] + 1))
# 计算每个词的TF*IDF的值
word_tf_idf = {}
for i in doc_frequency:
word_tf_idf[i] = word_tf[i] * word_idf[i]
# 对字典按值由大到小排序
dict_feature_select = sorted(word_tf_idf.items(), key=operator.itemgetter(1), reverse=True)
return dict_feature_select
if __name__ == '__main__':
data_list = loadDataSet() # 加载数据
features = feature_select(data_list) # 所有词的TF-IDF值
print(features)
print(len(features))
过程完全按照tf-idf公式实现。语料库构建需要额外使用jieba工具分词。每篇文章代表此处的句子。