import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn import feature_selection
import pylab as pl
# 从互联网读取titanic数据。
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
# 分离数据特征与预测目标。
y = titanic['survived']
#删除以下3列,axix=1代表删除列
X = titanic.drop(['row.names', 'name', 'survived'], axis = 1)
X['age'].fillna(X['age'].mean(),inplace = True) #inplace=true代表在原来的矩阵上直接修改
X.fillna('UNKNOWN',inplace = True)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=33)
vec = DictVectorizer()
X_train = vec.fit_transform(X_train.to_dict(orient = 'record'))
X_test = vec.transform(X_test.to_dict(orient = 'record'))
#使用决策树
dt = DecisionTreeClassifier(criterion='entropy')
dt.fit(X_train,y_train)
print(dt.score(X_test,y_test))
#筛选前20%的特征
fs = feature_selection.SelectPercentile(feature_selection.chi2,percentile=20)
X_train_fs = fs.fit_transform(X_train,y_train)
dt.fit(X_train_fs,y_train)
X_test_fs = fs.transform(X_test)
print(dt.score(X_test_fs,y_test))
percentiles = range(1, 100, 2)
results = []
for i in percentiles:
fs = feature_selection.SelectPercentile(feature_selection.chi2,percentile=i)
X_train_fs = fs.fit_transform(X_train,y_train)
result = cross_val_score(dt,X_train_fs,y_train,cv=5)
results = np.append(results,result.mean())
print(results)
pl.plot(percentiles,results)
pl.xlabel('percentiles of features')
pl.ylabel('accuracy')
pl.show()
fs = feature_selection.SelectPercentile(feature_selection.chi2,percentile=7)
X_train_fs = fs.fit_transform(X_train,y_train)
dt.fit(X_train_fs,y_train)
X_test_fs = fs.transform(X_test)
print(dt.score(X_test_fs,y_test))
机器学习python特征筛选
最新推荐文章于 2024-05-22 09:48:27 发布