一周算法实践(四)

要求:
使用网格搜索法对7个模型进行调优(调参时采用五折交叉验证的方式),并进行模型评估,记得展示代码的运行结果~

1,调入需要的库

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import  LGBMClassifier
from sklearn import preprocessing

2,数据预处理

data_all =pd.read_csv("/home/tarena/test/web/day11/data_all.csv")
print('数据的行列',data_all.shape)
X = data_all.drop(['status'],axis=1)
y = data_all['status']
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=2018)

3,开始导入模型

lr = LogisticRegression()
svc = SVC()
DT = DecisionTreeClassifier()
rfc = RandomForestClassifier()
gbc = GradientBoostingClassifier()
xgb = XGBClassifier(probability=True)
lgb = LGBMClassifier(probability=True)

4,逐个调参优化

#lr
param = {'C':[0.1,1,2,3],'penalty':['l1','l2']} 
grid=GridSearchCV(estimator=lr,param_grid=param,cv=5,scoring='accuracy')
grid.fit(x_train,y_train)
print('lr调参后得分:',grid.best_score_) 

#svm
param = {'C':[1,10,100],'kernel':['linear','sigmoid','rbf']}
grid=GridSearchCV(estimator=svc,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('svm调参后得分:',grid.best_score_)  

#Desicion Tree
param=  {'criterion': ['gini', 'entropy'], 'max_depth': [1,2,3,4,5,6], 'splitter': ['best', 'random'], 'max_features': ['log2', 'sqrt', 'auto']}
grid=GridSearchCV(estimator=DT,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('DT调参后得分:',grid.best_score_) 

#Random Forest
param= {'n_estimators': range(1,200), 'max_features': ['log2', 'sqrt', 'auto']}
grid=GridSearchCV(estimator=rfc,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('RF调参后得分:',grid.best_score_)

#GBDT
param= {'n_estimators': range(1,100,10),'learning_rate': np.arange(0.1, 1, 0.1)}
grid=GridSearchCV(estimator=gbc,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('GBDT调参后得分:',grid.best_score_) 

#XGB
param= {'eta':np.arange(0.1,0.5, 0.1), 'max_depth': range(1,6,1), 'min_child_weight': range(1,6,1)}
grid=GridSearchCV(estimator=xgb,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('XGB调参后得分:',grid.best_score_)


#LGBM
param = {'learning_rate': range(0.1,0.5,0.1), 'max_depth': range(1,6,1), 'n_estimators':range(30,50,5)}
grid=GridSearchCV(estimator=lgb,param_grid=param,cv=5)
grid.fit(x_scaled,y_train)
print('LGBM调参后得分:',grid.best_score_) 

5,调参前后得分对比

在这里插入图片描述
因为电脑不给力,部分模型进行了参数简化,并没有进行五折交叉验证。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值