sklearn 决策树

import pandas as pd
import numpy as np
import pylab as pl
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn import feature_selection
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report

titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
print(titanic.head())
# print(titanic.info())

#X=titanic[['pclass', 'age', 'sex']]
X=titanic.drop(['row.names', 'name', 'survived'], axis = 1)
y=titanic['survived']

# print(X.info())

X['age'].fillna(X['age'].mean(), inplace=True)
X.fillna('UNKNOWN', inplace=True)
# print(X.info())

X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25, random_state=33)

vec = DictVectorizer(sparse=True)

X_train=vec.fit_transform(X_train.to_dict(orient='record'))
print(len(vec.feature_names_))

X_test=vec.transform(X_test.to_dict(orient='record'))

#决策树
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
y_predict = dtc.predict(X_test)
print('Decision Tree: ', dtc.score(X_test, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))

dt=DecisionTreeClassifier(criterion='entropy')
dtc.fit(X_train, y_train)
y_predict = dtc.predict(X_test)
print('Decision Tree entropy: ', dtc.score(X_test, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))



percentiles = range(1,100,2)
results=[]
for i in percentiles:
    fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)
    X_train_fs = fs.fit_transform(X_train, y_train)
    scores=cross_val_score(dt,X_train_fs,y_train,cv=5)
    results=np.append(results, scores.mean())
print(results)

opt = np.where(results == results.max())[0]
print('Optimal number of features %d'  %np.array(percentiles)[opt])

pl.plot(percentiles, results)
pl.xlabel('percentiles of features')
pl.ylabel('accuracy')
pl.show()

fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)
X_train_fs = fs.fit_transform(X_train, y_train)
dt.fit(X_train_fs, y_train)
X_test_fs = fs.transform(X_test)
y_predict = dt.predict(X_test_fs)
print('Decision Tree SelectPercentile 20: ', dt.score(X_test_fs, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))

fs=feature_selection.SelectPercentile(feature_selection.chi2, percentile=7)
X_train_fs = fs.fit_transform(X_train, y_train)
dt.fit(X_train_fs, y_train)
X_test_fs = fs.transform(X_test)
y_predict = dt.predict(X_test_fs)
print('Decision Tree SelectPercentile 7: ', dt.score(X_test_fs, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))

#梯度提升决策树
gbc=GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_predict = gbc.predict(X_test)
print('Gradient Boosting: ', gbc.score(X_test, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))

#随机森林
rfc=RandomForestClassifier()
rfc.fit(X_train,y_train)
y_predict = rfc.predict(X_test)
print('Random Forest: ', rfc.score(X_test, y_test))
print(classification_report(y_predict, y_test, target_names=['died', 'survived']))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值