import pandas as pd import numpy as np import pylab as pl #from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.feature_extraction import DictVectorizer from sklearn.tree import DecisionTreeClassifier from sklearn import feature_selection from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt') print(titanic.head()) # print(titanic.info()) #X=titanic[['pclass', 'age', 'sex']] X=titanic.drop(['row.names', 'name', 'survived'], axis = 1) y=titanic['survived'] # print(X.info()) X['age'].fillna(X['age'].mean(), inplace=True) X.fillna('UNKNOWN', inplace=True) # print(X.info()) X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25, random_state=33) vec = DictVectorizer(sparse=True) X_train=vec.fit_transform(X_train.to_dict(orient='record')) print(len(vec.feature_names_)) X_test=vec.transform(X_test.to_dict(orient='record')) #决策树 dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) y_predict = dtc.predict(X_test) print('Decision Tree: ', dtc.score(X_test, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived'])) dt=DecisionTreeClassifier(criterion='entropy') dtc.fit(X_train, y_train) y_predict = dtc.predict(X_test) print('Decision Tree entropy: ', dtc.score(X_test, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived'])) percentiles = range(1,100,2) results=[] for i in percentiles: fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i) X_train_fs = fs.fit_transform(X_train, y_train) scores=cross_val_score(dt,X_train_fs,y_train,cv=5) results=np.append(results, scores.mean()) print(results) opt = np.where(results == results.max())[0] print('Optimal number of features %d' %np.array(percentiles)[opt]) pl.plot(percentiles, results) pl.xlabel('percentiles of features') pl.ylabel('accuracy') pl.show() fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20) X_train_fs = fs.fit_transform(X_train, y_train) dt.fit(X_train_fs, y_train) X_test_fs = fs.transform(X_test) y_predict = dt.predict(X_test_fs) print('Decision Tree SelectPercentile 20: ', dt.score(X_test_fs, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived'])) fs=feature_selection.SelectPercentile(feature_selection.chi2, percentile=7) X_train_fs = fs.fit_transform(X_train, y_train) dt.fit(X_train_fs, y_train) X_test_fs = fs.transform(X_test) y_predict = dt.predict(X_test_fs) print('Decision Tree SelectPercentile 7: ', dt.score(X_test_fs, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived'])) #梯度提升决策树 gbc=GradientBoostingClassifier() gbc.fit(X_train, y_train) y_predict = gbc.predict(X_test) print('Gradient Boosting: ', gbc.score(X_test, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived'])) #随机森林 rfc=RandomForestClassifier() rfc.fit(X_train,y_train) y_predict = rfc.predict(X_test) print('Random Forest: ', rfc.score(X_test, y_test)) print(classification_report(y_predict, y_test, target_names=['died', 'survived']))
sklearn 决策树
最新推荐文章于 2024-07-14 21:35:07 发布