import warnings
warnings.filterwarnings(‘ignore’)
import os
import pandas as pd
import numpy as np
from pylab import *
matplotlib.rcParams[‘font.sans-serif’] = [‘SimHei’]
matplotlib.rcParams[‘axes.unicode_minus’] = False
一:分类
1、读入aviation数据集,去除重复值、空值。
data = pd.read_excel(‘aviation.xls’,index_col=‘MEMBER_NO’)
print(data.head())
data.dropna(inplace=True)
data.drop_duplicates(inplace=True)
print(data.info())
2、切片抽取10000样本,切片X和Y。
sample_data = data.sample(1000)
X = sample_data.iloc[:,:-1]
y = sample_data.iloc[:,-1]
print(X.shape)
3、使用5折交叉验证方法比较XGBOOST和随机森林的F1分值
from sklearn.model_selection import cross_val_score
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
model1 = XGBClassifier()
model2 = RandomForestClassifier()
cv1 = cross_val_score(model1,X,y,scoring=‘f1’,cv=5)
cv2 = cross_val_score(model2,X,y,scoring=‘f1’,cv=5)
print(“XGBOOST和随机森林的F1分值分别为:{},{}”.format(cv1.mean(),cv2.mean()))
4、对上题中F1分值高的模型,进行网格搜索找到该模型的最优参数。
‘’‘XGBOOST的F1值较高’‘’
from sklearn.model_selection import GridSearchCV
param = {‘n_estimators’:[50,100,150],
‘subsample’:[0.4,0.6,0.8,1]}
grid = GridSearchCV(model1,param_grid=param,scoring=‘f1’,cv=10)
grid.fit(X,y)
print(grid.best_params_) #‘n_estimators’: 100, ‘subsample’: 1}
5、按照7:3划分数据集。
from sklearn.model_selection import train_test_split
X,y=data.iloc[:,:-1],data.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=1)
6、使用训练集数据进行模型训练,对测试集数据进行预测,打印出混淆矩阵和结果报告,画出ROC曲线。
from sklearn.metrics import classification_report,confusion_matrix,roc_curve
model1.set_params(n_estimators=100,subsample=1)
model1.fit(X_train,y_train)
print(confusion_matrix(y_test,model1.predict(X_test)))
print(classification_report(y_test,model1.predict(X_test)))
#生成ROC曲线
预测的概率值—为0的概率和为1的概率两列
fpr, tpr, thresholds = roc_curve(y_test,model1.predict_proba(X_test)[:,1])
plt.plot(fpr,tpr)
plt.show()