我做了一个导航站(域名是挂路灯的全拼gualudeng),里面精选了各种影视,动漫,黑科技,实用工具,搞笑有趣的站点,动动大家可爱的小手,点进来看看吧,良心站点。
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.naive_bayes import GaussianNB,MultinomialNB
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
if __name__=='__main__':
#加载文本分类数据
#只选择4个类别进行分类
#若分类所有类别,请注意内存是否够用
categories = 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space'
# remove = ('headers', 'footers', 'quotes')
remove = ()
data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=0, remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=0, remove=remove)
y_train = data_train.target
y_test = data_test.target
#print u'数据类型:', type(data_train)
print u'训练集包含的文本数目:', len(data_train.data)
print u'测试集包含的文本数目:', len(data_test.data)
#打印5个文本的详细信息
'''
for i in np.arange(5):
print u'文本%d(属于类别 - %s):' % (i+1, categories[y_train[i]])
print data_train.data[i]
print '\n\n'
'''
#文本特征tf-idf向量化
vectorizer = TfidfVectorizer(input='content', stop_words='english', max_df=0.5, sublinear_tf=True)
x_train = vectorizer.fit_transform(data_train.data) # x_train是稀疏的,scipy.sparse.csr.csr_matrix
x_test = vectorizer.transform(data_test.data) #注意此处一定要用 transform 保证两个列表纵向维度一致
#print x_train.shape
#print x_test.shape
feature_names = np.asarray(vectorizer.get_feature_names())
#print feature_names
#开始预测
model = MultinomialNB()
#需要交叉验证获取平滑系数#
alpha = GridSearchCV(model,param_grid={"alpha":[0.001,0.01,0.1,1,10,100,1000]},cv=10)
alpha.fit(x_train, y_train)
#最佳参数为0.1
#print alpha.best_params_
#用测试数据,测试效果
pre_y = alpha.predict(x_test)
m=0
for i in range(len(pre_y)):
if pre_y[i]==y_test[i]:
m=m+1
print "准确率:",m*1.0/len(pre_y)