对停用词进行分词
def makeStopWords():
with open('../../dataset/stopWords.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
stopWords = []
for line in lines:
# 使用jieba分词:
# lcut()与cut()的区别:lcut()将分词结果直接输出列表格式;
words = jieba.lcut(line,cut_all = False)
# words = jieba.cut(line)
for word in words:
stopWords.append(word)
return stopWords
import os
import fasttext
import logging
配置日志信息
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
设置工作空间
path = "/home/ubuntu/dataset/"
os.chdir(path)
训练分类
# 爬取的数据需要经过停用词处理,该数据已经处理过
classifier = fasttext.supervised('news_fasttext_train.txt', 'fasttext_train.model', label_prefix='__label__')
加载模型
classifier = fasttext.load_model("fasttext_train.model.bin", label_prefix = '__label__')
数据预处理
texts = []
labels = []
with open("news_fasttext_test.txt") as f:
# lines = f.readlines()
for line in f:
line = line.encode("utf-8").decode("utf-8").rstrip()
labels.append(line.split("\t")[1].replace("__label__",""))
texts.append(line.split("\t")[0])
测试数据
# 测试模型
result = classifier.test("news_fasttext_test.txt")
print result.precision
print result.recall
模型预测
test_predict_labels = classifier.predict(texts)
test_label转为list集合
test_predict_label_list = [ e[0] for e in test_predict_labels]
获取标签
test_label = list(set(labels))
test_predict_label = list(set(test_predict_label_list))
# 测试数据中各个类的数量
# 预测数据中各个类的数量
# 预测正确的数量
A = dict.fromkeys(labels, 0)
B = dict.fromkeys(test_predict_label, 0)
C = dict.fromkeys(test_predict_label, 0)
for i in range(0, len(labels)):
A[labels[i]] += 1
B[test_predict_labels[i]] += 1
if labels[i] == test_predict_labels[i]:
C[labels[i]] += 1
print(A)
print(B)
print(C)
#计算准确率,召回率,F值
for key in B:
try:
r = float(A[key]) / float(B[key])
p = float(A[key]) / float(C[key])
f = p * r * 2 / (p + r)
print "%s:\t p:%f\t r:%f\t f:%f" %(key,p,r,f)
except:
print "error:", key, "right:", A.get(key,0), "real:", B.get(key,0), "predict:",C.get(key,0)
文本分类小插曲
news_fastext_train.txt
news_fasttext_test.txt
FastText官方github
停用词