机器学习之分类

分类

一、logistic回归

from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()#导入数据
x = cancer['data']
y = cancer['target']
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=22)#划分训练集和测试集

stdscaler = StandardScaler().fit(x_train)
x_trainStd = stdscaler.transform(x_train)
x_testStd = stdscaler.transform(x_test)

lr_model = LogisticRegression(solver='saga')#构建logistic模型
lr_model.fit(x_trainStd,y_train)#模型训练

lr_model.score(x_testStd,y_test)#模型得分

二、SVM

1、代码

from sklearn.svm import SVC
svc_model = SVC()
svc_model.fit(x_trainStd,y_train)
svc_model.score(x_testStd,y_test)

2、模型评价

from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve,auc
#准确率
accuracy_score(y_true=y_test,y_pred=y_pred)#准确率,与模型自带score的得分相同
#混淆矩阵
classification_report(y_true=y_test,y_pred=y_pred)
#ROC曲线
fpr,tpr,thresholds = roc_curve(y_test,y_pred)
roc_auc = auc(fpr,tpr)
plt.plot(fpr,tpr,lw=1,label='ROC(area=%0.2f)'%(roc_auc))
plt.rcParams['font.sans-serif']='SimHei'
plt.rcParams['axes.unicode_minus']=False
plt.xlabel('FPR(假正率)')
plt.ylabel('TPR(真正率)')
plt.title('ROC曲线,ROC(AUC=%0.2F)'%(roc_auc))
plt.show()

3、参数调优

#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'C':[0.1,0.5,1,2],
             'kernel':['poly','rbf','sigmoid']}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(svc_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

三、决策树

安装graphviz真的是迷之操作,下面为安装参考文档。
https://blog.csdn.net/weixin_40085833/article/details/90750929
https://blog.csdn.net/qq_16906867/article/details/105558288
https://blog.csdn.net/authorized_keys/article/details/100532002

决策树模型无须标准化,直接使用x_train,x_test作为模型输入

from sklearn.tree import DecisionTreeClassifier
dt_model = DecisionTreeClassifier()
dt_model.fit(x_train,y_train)
dt_model.score(x_test,y_test)#测试集准确率

#决策树可视化
from sklearn import tree
import graphviz #导入决策树可视化模块
dot_data = tree.export_graphviz(dt_model,out_file=None)#以dot格式导出决策树
graph = graphviz.Source(dot_data)#获取生成的决策树
graph.render('C:/Users/aolin/Desktop/tmp/cancer')#可以在指定路径下生成一个pdf文档
graph#可直接在输出窗口查看生成的决策树模型

#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'max_depth':[1,3,5,7,9,11,13],
             'criterion':['gini','entropy'],
             'min_samples_split':[1,3,5,7,9,11,13]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(dt_model,parameters,scoring='roc_auc',cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

四、KNN

from sklearn.neighbors import KNeighborsClassifier
knn_model = KNeighborsClassifier()
knn_model.fit(x_trainStd,y_train)
knn_model.score(x_testStd,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'n_neighbors':[1,2,3,4,5,6,7,8]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(knn_model,parameters,cv=5)#网格搜索
grid_search.fit(x_trainStd,y_train)
grid_search.best_params_#输出参数的最优值

五、朴素贝叶斯

from sklearn.naive_bayes import GaussianNB
gnb_model = GaussianNB()
gnb_model.fit(x_trainStd,y_train)
gnb_model.score(x_testStd,y_test)

六、随机森林

随机森林模型使用的数据无须标准化。

from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_model.fit(x_train,y_train)
rf_model.score(x_test,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators':[1,5,10,15,20],
             'max_depth':[1,2,3,4,5],
             'min_samples_leaf':[5,10,15,20,30]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(rf_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

七、多层感知机

from sklearn.neural_network import MLPClassifier
mlp_model = MLPClassifier(max_iter=1000,random_state=3)
mlp_model.fit(x_trainStd,y_train)
mlp_model.score(x_testStd,y_test)

八、AdaBoost

#AdaBoost
from sklearn.ensemble import AdaBoostClassifier
abc_model = AdaBoostClassifier()
abc_model.fit(x_train,y_train)
abc_model.score(x_test,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators':[5,10,20,40,50],
             'learning_rate':[0.1,0.5,1]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(abc_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

九、GBDT

#GBDT梯度提升树
from sklearn.ensemble import GradientBoostingClassifier
gbdt_model = GradientBoostingClassifier()
gbdt_model.fit(x_train,y_train)
gbdt_model.score(x_test,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'loss':['deviance','exponential'],
             'learning_rate':[0.1,0.5,1],
             'n_estimators':[5,10,30,50,100]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(gbdt_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

十、XGboost

#xgboost
from xgboost import XGBClassifier
xgb_model = XGBClassifier()
xgb_model.fit(x_train,y_train)
xgb_model.score(x_test,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'max_depth':[1,3,5],
             'lear  ning_rate':[0.01,0.05,0.1,0.2],
             'n_estimators':[5,10,50,100,150]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(xgb_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值

十一、LightGBM

#LightGBM
from lightgbm import LGBMClassifier
lgbm_model = LGBMClassifier()
lgbm_model.fit(x_train,y_train)
lgbm_model.score(x_test,y_test)
#参数调优
from sklearn.model_selection import GridSearchCV
parameters = {'num_leaves':[5,10,15,31],
             'learning_rate':[0.01,0.05,0.1,0.2],
             'n_estimators':[5,10,20,30]}#指定决策树模型各参数的候选值范围
grid_search = GridSearchCV(lgbm_model,parameters,cv=5)#网格搜索
grid_search.fit(x_train,y_train)
grid_search.best_params_#输出参数的最优值
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值