泰坦尼克号生存预测-集成学习

直接上代码

# 导包

import pandas as pd
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.feature_extraction import DictVectorizer
from sklearn.ensemble import RandomForestClassifier

# 读入数据
filename = 'titanic.csv' 
data = pd.read_csv(filename)

# 定义 X , y
x = data[['Pclass', 'Age', 'Sex']]
y = data['Survived']

# 用年龄中值填入缺失的年龄值
x['Age'].fillna(x['Age'].mean(), inplace=True)

# 训练集拆分为训练集和验证集。并且训练集和验证集的比列为3:1。
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25)

#  进行处理(特征工程-特征-类别-one_hot编码)
dictver = DictVectorizer(sparse=False)
#pd转化字典:to_dict
x_train = dictver.fit_transform(x_train.to_dict(orient="records"))
x_test = dictver.fit_transform(x_test.to_dict(orient="records"))

print("----------------------------------------------------------------------------------------------------------------------------")
print("随机森林")
rf=RandomForestClassifier()
# 网格搜索
params={"n_estimators":[20, 30, 50, 100, 200, 500,800,1000],"max_depth":[5, 8, 15, 25, 30,35,40,50]}
gscv=GridSearchCV(rf,params,cv=2)
#机器学习
gscv.fit(x_train,y_train)

print("随机森林 准确率:",gscv.score(x_test,y_test))
print("随机森林 查看模型的选择参数:",gscv.best_params_)




print("----------------------------------------------------------------------------------------------------------------------------")
print("决策树")
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report




parameters={'max_depth':[1,3,5,10,20,30]}
tree=GridSearchCV(DecisionTreeClassifier(),param_grid=parameters,scoring='accuracy')
tree.fit(x_train,y_train)

print("-------------显示网格搜索到的最佳决策树深度和得分-")
print(tree.best_params_)
print(tree.best_score_)

print("-------------决策树预测的精度,查全率、查准率、F1 分数等--")
y_pred=tree.predict(x_test)
print("决策树 准确率:" ,accuracy_score(y_test,y_pred))
print(classification_report(y_test,y_pred))


print("----------------------------------------------------------------------------------------------------------------------------")
# print("AdaBoost")
# from sklearn.ensemble import AdaBoostClassifier
# parameters = {'n_estimeators':[20,50,100,200,300,500], 'learing_rate':[0.1,0.01,0.001]}
# ada_clf=GridSearchCV(AdaBoostClassifier(DecisionTreeClassifier(max_depth= 3 ,max_features = 3)),param_grid= parameters, scoring='accuracy')

# ada_clf.fit(xtrain,ytrain)
# ypred=ada_clf.predict(xtest)
# print("AdaBoost 准确率:" ,accuracy_score(ytest,ypred))




print("----------------------------------------------------------------------------------------------------------------------------")
print("梯度提升树")
from sklearn.ensemble import GradientBoostingClassifier
param_grid={'n_estimators':[30,50,80,120,200],'learning_rate':[0.05,0.1,0.5,1],'max_depth':[1,2,3,4,5]}
grid_search=GridSearchCV(GradientBoostingClassifier(),param_grid,cv=5)
 
grid_search.fit(x_train,y_train)

print("梯度提升树 准确率:")
grid_search.best_params_,grid_search.best_score_

在这里插入图片描述

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

常驻客栈

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值