Python:基于Sklearn的MNIST分类问题代码

程序代码

############### 相关库导入部分 ###############
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import plot_tree
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import learning_curve
from sklearn.metrics import classification_report
import numpy as np

########## 导入MNIST数据集 ##########
mnist=fetch_openml("mnist_784",version=1,cache=True)
x=mnist.data
y=mnist.target

########## 对输入特征进行主成分分析降维以提高训练效率 ##########
print("降维前的特征数:",x.shape[1])
PCA_Transfer=PCA(n_components=0.95)
PCA_x=PCA_Transfer.fit_transform(x)
print("降维后的特征数:",PCA_x.shape[1])

########## 划分训练集和测试集 ##########
x_train,x_test,y_train,y_test=train_test_split(PCA_x,y,test_size=0.2,random_state=1)

########## 对输入特征值进行标准化处理 ##########
Standard_Transfer=StandardScaler()
standard_x_train=Standard_Transfer.fit_transform(x_train)
standard_x_test=Standard_Transfer.fit_transform(x_test)

########## 对输入特征值进行归一化处理 ##########
MinMax_Transfer=MinMaxScaler()
Guiyihua_x_train=MinMax_Transfer.fit_transform(x_train)
Guiyihua_x_test=MinMax_Transfer.fit_transform(x_test)

########## 使用不同的分类器进行分类并比较正确率 ##########

'''
##### KNN算法 #####
KNN_scores=[]
KNN_BestParam=0
KNN_BestScore=0
for i in range(1,11):
    KNN_estimator = KNeighborsClassifier(n_neighbors=i)
    KNN_estimator.fit(standard_x_train, y_train)
    score=KNN_estimator.score(standard_x_test,y_test)
    KNN_scores.append(score)
    if score>KNN_BestScore:
        KNN_BestScore=score
        KNN_BestParam=i
plt.plot(range(1,11),KNN_scores)
plt.show()
print("KNN算法最高准确率:",KNN_BestScore," 最佳超参数:", KNN_BestParam)
'''

##### 朴素贝叶斯算法 #####
Bayes_estimator1=MultinomialNB()
param_dic={"alpha":[0.5,0.6,0.7,0.8,0.9,1,1.1,1.2]}
Bayes_estimator1=GridSearchCV(Bayes_estimator1,param_grid=param_dic,cv=10,n_jobs=-1)
Bayes_estimator1.fit(Guiyihua_x_train,y_train)
print("多项式Bayes算法在测试集上的平均预测成功率:",Bayes_estimator1.score(Guiyihua_x_test,y_test))
'''
y_predict=Bayes_estimator1.predict(x_test)
Bayes_table1=classification_report(y_test,y_predict)
print(Bayes_table1)
train_sizes,train_scores,test_scores=learning_curve(Bayes_estimator1,x,y)
plt.figure(1)
plt.xlabel("样本个数",fontproperties="Simhei")
plt.ylabel("预测准确率",fontproperties="Simhei")
plt.grid(True)
line1,=plt.plot(train_sizes,np.mean(train_scores,axis=1),color='r',label="train score")
line2,=plt.plot(train_sizes,np.mean(test_scores,axis=1),color='g',label="test score")
plt.legend([line1,line2],["train_score","test_score"])
plt.show()
'''


Bayes_estimator2=GaussianNB()
Bayes_estimator2=GridSearchCV(Bayes_estimator2,cv=10,param_grid={},n_jobs=-1)
Bayes_estimator2.fit(x_train,y_train)
print("高斯Bayes算法在测试集上的平均预测成功率:",Bayes_estimator2.score(x_test,y_test))
'''
y_predict=Bayes_estimator2.predict(x_test)
Bayes_table2=classification_report(y_test,y_predict)
print(Bayes_table2)
train_sizes,train_scores,test_scores=learning_curve(Bayes_estimator2,x,y)
plt.figure(2)
plt.xlabel("样本个数",fontproperties="Simhei")
plt.ylabel("预测准确率",fontproperties="Simhei")
plt.grid(True)
line1,=plt.plot(train_sizes,np.mean(train_scores,axis=1),color='r',label="train score")
line2,=plt.plot(train_sizes,np.mean(test_scores,axis=1),color='g',label="test score")
plt.legend([line1,line2],["train_score","test_score"])
plt.show()
'''

Bayes_estimator3=BernoulliNB()
Bayes_estimator2=GridSearchCV(Bayes_estimator2,cv=10,param_grid={},n_jobs=-1)
Bayes_estimator3.fit(x_train,y_train)
print("伯努利Bayes算法在测试集上的平均预测成功率:",Bayes_estimator3.score(x_test,y_test))
'''
y_predict=Bayes_estimator3.predict(x_test)
Bayes_table3=classification_report(y_test,y_predict)
print(Bayes_table3)
train_sizes,train_scores,test_scores=learning_curve(Bayes_estimator3,x,y)
plt.figure(3)
plt.xlabel("样本个数",fontproperties="Simhei")
plt.ylabel("预测准确率",fontproperties="Simhei")
plt.grid(True)
line1,=plt.plot(train_sizes,np.mean(train_scores,axis=1),color='r',label="train score")
line2,=plt.plot(train_sizes,np.mean(test_scores,axis=1),color='g',label="test score")
plt.legend([line1,line2],["train_score","test_score"])
plt.show()
'''

##### 决策树算法 #####
DecisionTree_estimator1=DecisionTreeClassifier(random_state=1,criterion="entropy")
CV_DecisionTree_estimator1=GridSearchCV(DecisionTree_estimator1,cv=10,param_grid={},n_jobs=-1)
CV_DecisionTree_estimator1.fit(x_train,y_train)
print("信息熵决策树算法在测试集上的平均预测成功率:", CV_DecisionTree_estimator1.score(x_test,y_test))
'''
y_predict=CV_DecisionTree_estimator1.predict(x_test)
CV_DecisionTree_table1=classification_report(y_test,y_predict)
print(CV_DecisionTree_table1)
'''
#DecisionTree_estimator1.fit(x_train,y_train)
#DecisionTree_Picture=plot_tree(DecisionTree_estimator)
#plt.show()
# export_graphviz(DecisionTree_estimator,out_files="DecisionTree.dot")
'''
train_sizes,train_scores,test_scores=learning_curve(DecisionTree_estimator1,x,y)
plt.figure(4)
plt.xlabel("样本个数",fontproperties="Simhei")
plt.ylabel("预测准确率",fontproperties="Simhei")
plt.grid(True)
line1,=plt.plot(train_sizes,np.mean(train_scores,axis=1),color='r',label="train score")
line2,=plt.plot(train_sizes,np.mean(test_scores,axis=1),color='g',label="test score")
plt.legend([line1,line2],["train_score","test_score"])
plt.show()
'''

DecisionTree_estimator2=DecisionTreeClassifier(random_state=1,criterion="gini")
CV_DecisionTree_estimator2=GridSearchCV(DecisionTree_estimator2,cv=10,param_grid={},n_jobs=-1)
CV_DecisionTree_estimator2.fit(x_train,y_train)
print("基尼指数决策树算法在测试集上的平均预测成功率:", CV_DecisionTree_estimator2.score(x_test,y_test))
'''
y_predict=CV_DecisionTree_estimator2.predict(x_test)
CV_DecisionTree_table2=classification_report(y_test,y_predict)
print(CV_DecisionTree_table2)
train_sizes,train_scores,test_scores=learning_curve(DecisionTree_estimator2,x,y)
plt.figure(5)
plt.xlabel("样本个数",fontproperties="Simhei")
plt.ylabel("预测准确率",fontproperties="Simhei")
plt.grid(True)
line1,=plt.plot(train_sizes,np.mean(train_scores,axis=1),color='r',label="train score")
line2,=plt.plot(train_sizes,np.mean(test_scores,axis=1),color='g',label="test score")
plt.legend([line1,line2],["train_score","test_score"])
plt.show()
'''

'''
##### 随机森林算法 #####
RandomForest_estimator1=RandomForestClassifier(random_state=1,criterion="entropy")
param_dic={"n_estimators":[2,4,6,8,10,12,14,16,18,20]}
CV_RandomForest_estimator1=GridSearchCV(RandomForest_estimator1,cv=10,param_grid=param_dic)
CV_RandomForest_estimator1.fit(x_train,y_train)
print("信息熵随机森林算法在测试集上的平均预测成功率:",CV_RandomForest_estimator1.score(x_test,y_test)," 最佳超参数为:",CV_RandomForest_estimator1.best_params_)

RandomForest_estimator2=RandomForestClassifier(random_state=1,criterion="gini")
param_dic={"n_estimators":[2,4,6,8,10,12,14,16,18,20]}
CV_RandomForest_estimator2=GridSearchCV(RandomForest_estimator1,cv=10,param_grid=param_dic)
CV_RandomForest_estimator2.fit(x_train,y_train)
print("信息熵随机森林算法在测试集上的平均预测成功率:",CV_RandomForest_estimator2.score(x_test,y_test)," 最佳超参数为:",CV_RandomForest_estimator2.best_params_)
'''

'''
##### 逻辑回归算法 #####
logit_estimator=LogisticRegression()
logit_estimator.fit(x_train,y_train)
print("逻辑回归算法在测试集上的平均预测成功率:",logit_estimator.score(x_test,y_test))
'''
  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值