1.算法介绍
2.代码所用数据
文件结构
├─doc_classification.py
├─stopwords.txt
├─vocabulary.txt
├─train.data
├─train.label
├─train.map
├─test.data
├─test.label
└─test.map
python代码
需要安装的库:
pandas, liblinearutil
注:Windows平台下 liblinearutil 安装包(32/64)
# doc_classification.py
import pandas as pd
import math
from liblinearutil import *
import time
# 读取数据
def loadOriginData(src='train'):
# train.data
dataSrc = r'%s.data' % src
# train.label
labelSrc = r'%s.label' % src
label = pd.read_table(labelSrc, sep=' ', names=['label'])
# train.map
mapSrc = r'%s.map' % src
# 每个文档拥有的terms
doc2term = {}
# 每个term出现在哪些文档
term2doc = {}
# 每个类别下有哪些docs
cate2docs = {}
# TF值
TF = {}
with open(dataSrc, 'r') as f:
for line in f:
str_docIdx, str_wordIdx, str_cnt = line.split()
docIdx = int(str_docIdx)
wordIdx = int(str_wordIdx)
cnt = int(str_cnt)
# update 数据结构
doc2term.setdefault(docIdx, []).append(wordIdx)
term2doc.setdefault(wordIdx, []).append(docIdx)
TF.setdefault(docIdx, {})[wordIdx] = cnt
# 统计每个类别下有哪些文档
with open(labelSrc, 'r') as f:
for line_index, line in enumerate(f, 1