随机森林分类的调参流程及特征重要性评估

在这里插入代码片
# =============================================================================
# 采用随机森林实现低乳腺癌的预测
# https://zhuanlan.zhihu.com/p/107389097?utm_source=wechat_session&utm_medium=social&utm_oi=630066686708158464
# 数据集加载与查看
import pandas as pd
from sklearn.datasets import load_breast_cancer

dataset = load_breast_cancer()
columns = dataset.feature_names.tolist() # 提取数据集特征名,并将其转换为列表
data = pd.DataFrame(dataset['data'], columns = columns)
data['cancer'] = dataset['target']

display(data.head()) # 查看新的DataFrame,确保它符合预期
display(data.info()) # 查看每一列中的数据类型和数据量,可能需要根据需要进行数据类型转换
display(data.isna().sum()) # 确保没有Nan值,如果有需要进行缺省值处理或者删除
display(data.describe()) # 了解每一列的最小值、最大值、均值、中位数、标准差、四分位数范围
# =============================================================================
# 数据打乱与划分
from sklearn.model_selection import train_test_split 
X = data.drop('cancer', axis = 1)
y = data['cancer']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, 
                                                    random_state = 2020,
                                                    stratify = y)
# 设置stratify = y 确保训练集和测试集中中的标签0、1比例一致
# https://www.cnblogs.com/Yanjy-OnlyOne/p/11288098.html
# =============================================================================
# 规范化数据(数据居中和标准化)
import numpy as np
from sklearn.preprocessing import StandardScaler 
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.fit_transform(X_test)
# fit_transform和transform的区别与联系
# https://blog.csdn.net/weixin_38278334/article/details/82971752
y_train = np.array(y_train) # 转换Series为数组类型
# =============================================================================
# 拟合基线随机森林模型
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import recall_score
rfc = RandomForestClassifier()
rfc.fit(X_train_scaled, y_train)
print('Score of the 1st rfc:', rfc.score(X_test_scaled, y_test))
# 模型分数应该采用测试集进行计算
# =============================================================================
# 特征重要性评估:通过RFC内的属性值调出
import seaborn as sns
import matplotlib.pyplot as plt
feats = {}
# 创建feature和importance相对应的字典
for feature, importance in zip(data.columns, rfc.feature_importances_):
    # zip 用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组
    # 返回这些元组组成的对象
    feats[feature] = importance

importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0:'Gini-Importance'})
# DataFrame.from_dict 接收从字典组成的字典或数组序列字典,并生成DataFrame
# orient = 'columns'(默认)表示把字典中的键作为列标签。orient= 'index'表示将其作为行标签
# DataFrame.rename(columns = {'Oldname':'Newname'})列标签重命名
importances = importances.sort_values(by='Gini-Importance', ascending = False)
# DataFrame.sort_values()排序 https://blog.csdn.net/wendaomudong_l2d4/article/details/80648633
# axis = 0 or 'index', 1 or 'columns' default=0,即按行排序或者列排序
# by = '', 即按照某一行名或者列名排序
# ascending = True, False 升序或者降序 
importances = importances.reset_index() # 重置添加行标签
importances = importances.rename(columns={'index':'Features'})
sns.set(font_scale = 5)
sns.set(style="whitegrid", color_codes=True, font_scale = 1.7)
fig, ax = plt.subplots()
fig.set_size_inches(30,15)
sns.barplot(x=importances['Gini-Importance'], y=importances['Features'], data=importances, color='skyblue')
plt.xlabel('Importance', fontsize=25, weight = 'bold')
plt.ylabel('Features', fontsize=25, weight = 'bold')
plt.title('Feature Importance', fontsize=25, weight = 'bold')
display(plt.show())
print('The importances of the features' +
      ' according to the RandomForestClassifier:')
display(importances)
# =============================================================================
# 主成分分析(PCA)
from sklearn.decomposition import PCA
pca_test = PCA(n_components = 30) # n_components表示PCA后的特征数
pca_test.fit(X_train_scaled)
plt.figure(figsize=(15,12))
sns.set(style = 'whitegrid')
plt.plot(np.cumsum(pca_test.explained_variance_ratio_))
# np.cumsum()进行累加。
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.axvline(linewidth=4, color='r', linestyle='--', x = 10, ymin = 0, ymax = 1)
plt.show()
evr = pca_test.explained_variance_ratio_
cvr = np.cumsum(pca_test.explained_variance_ratio_)
pca_df = pd.DataFrame()
pca_df = pd.DataFrame()
pca_df['Cumulative Variance Ratio'] = cvr # 累计方差比
pca_df['Explained Variance Ratio']= evr   # 解释方差比
print('PCA求得的累计方差比和解释方差比(前10项)')
display(pca_df.head(10))

# 使用PCA进行降维与特征提取
pca = PCA(n_components = 10)
pca.fit(X_train_scaled)
X_train_scaled_pca = pca.transform(X_train_scaled)
X_test_scaled_pca = pca.transform(X_test_scaled)
# 所得到的新的特征是原始特征与相应权重的线性组合

pca_dims = []
for x in range(0,len(pca_df)):
    pca_dims.append('PCA Component {}'.format(x))
    
pca_test_df = pd.DataFrame(pca_test.components_, columns = columns,
                           index = pca_dims)
# pca_test.components_返回的是每个PCA主成分由原始特征组成的权重
print('采用PCA求得的特征:')
display(pca_test_df.head(10))
# =============================================================================
# 基于主成分分析后的特征X_train_scaled_pca和X_test_scaled_pca重新训练
rfc_pca = RandomForestClassifier()
rfc_pca.fit(X_train_scaled_pca, y_train)
print('Score of the 2nd rfc:', rfc_pca.score(X_test_scaled_pca, y_test))
# =============================================================================
# 第一轮参数调优:RandomSearchCV
# 通过RandomSearchCV可以考虑更多的超参数
# 关于随机森林的超参数可以在sklearn文档中找到
from sklearn.model_selection import RandomizedSearchCV
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1000, num = 10)]
max_features = ['log2', 'sqrt']
max_depth = [int(x) for x in np.linspace(start = 1, stop = 15, num = 15)]
min_samples_split = [int(x) for x in np.linspace(start = 2, stop = 50, num = 10)]
min_samples_leaf = [int(x) for x in np.linspace(start = 2, stop = 50, num = 10)]
bootstrap = [True, False]
param_rand = {'n_estimators': n_estimators,'max_features': max_features,
              'max_depth': max_depth,
              'min_samples_split': min_samples_split,
              'min_samples_leaf': min_samples_leaf,
              'bootstrap': bootstrap}
rs = RandomizedSearchCV(rfc_pca, param_rand, n_iter = 100, cv = 3, verbose = 1, 
                        n_jobs = -1, random_state = 0)
rs.fit(X_train_scaled_pca, y_train)
print('The best paramaters after randomsearch:', rs.best_params_)
rs_df = pd.DataFrame(rs.cv_results_).sort_values('rank_test_score').reset_index(drop=True)
rs_df = rs_df.drop(['mean_fit_time', 'std_fit_time',
                   'mean_score_time', 'std_score_time',
                   'params', 'split0_test_score',
                   'split1_test_score', 'split2_test_score',
                   'std_test_score'], axis = 1)
display(rs_df.head(10))
# 超参数评分可视化
fig, axs = plt.subplots(ncols=3, nrows=2)
sns.set(style="whitegrid", color_codes=True, font_scale = 2)
fig.set_size_inches(30,25)
sns.barplot(x='param_n_estimators', y='mean_test_score', data=rs_df, ax=axs[0,0], color='lightgrey')
axs[0,0].set_ylim([.83,.93])
axs[0,0].set_title(label = 'n_estimators', size=30, weight='bold')
sns.barplot(x='param_min_samples_split', y='mean_test_score', data=rs_df, ax=axs[0,1], color='coral')
axs[0,1].set_ylim([.85,.93])
axs[0,1].set_title(label = 'min_samples_split', size=30, weight='bold')
sns.barplot(x='param_min_samples_leaf', y='mean_test_score', data=rs_df, ax=axs[0,2], color='lightgreen')
axs[0,2].set_ylim([.80,.93])
axs[0,2].set_title(label = 'min_samples_leaf', size=30, weight='bold')
sns.barplot(x='param_max_features', y='mean_test_score', data=rs_df, ax=axs[1,0], color='wheat')
axs[1,0].set_ylim([.88,.92])
axs[1,0].set_title(label = 'max_features', size=30, weight='bold')
sns.barplot(x='param_max_depth', y='mean_test_score', data=rs_df, ax=axs[1,1], color='lightpink')
axs[1,1].set_ylim([.80,.93])
axs[1,1].set_title(label = 'max_depth', size=30, weight='bold')
sns.barplot(x='param_bootstrap',y='mean_test_score', data=rs_df, ax=axs[1,2], color='skyblue')
axs[1,2].set_ylim([.88,.92])
# =============================================================================
#  第二轮参数调优:GridSearchCV
from sklearn.model_selection import GridSearchCV
n_estimators = [300, 500, 700]
max_features = ['sqrt']
max_depth = [2, 3, 7, 11, 15]
min_samples_split = [2, 3, 4, 22, 23, 24]
min_samples_leaf = [2, 3, 4, 5, 6, 7]
bootstrap = [False]
param_grid = {'n_estimators':n_estimators,'max_features': max_features, 
              'max_depth':max_depth, 'min_samples_split': min_samples_split,
              'min_samples_leaf': min_samples_leaf, 'bootstrap':bootstrap,
            }
gs = GridSearchCV(rfc_pca, param_grid, cv = 3, verbose = 1, n_jobs = -1)
gs.fit(X_train_scaled_pca, y_train)
rfc_grid = gs.best_estimator_
print('The best parameters after the GridSearchCV:', gs.best_params_)
# =============================================================================
# 模型比较
y_pred = rfc.predict(X_test_scaled) # 未进行PCA和调参模型
y_pred_pca = rfc_pca.predict(X_test_scaled_pca) # 经过PCA,未进行调参
y_pred_gs = gs.best_estimator_.predict(X_test_scaled_pca) # 经过PCA和GridSearchCV调参后的
# 为模型创建混淆矩阵
from sklearn.metrics import confusion_matrix
conf_matrix_baseline = pd.DataFrame(confusion_matrix(y_test, y_pred), 
                                    index = ['actual 0', 'actual 1'], 
                                    columns = ['predicted 0', 'predicted 1'])

conf_matrix_baseline_pca = pd.DataFrame(confusion_matrix(y_test, y_pred_pca), 
                                        index = ['actual 0', 'actual 1'], 
                                        columns = ['predicted 0', 'predicted 1'])

conf_matrix_tuned_pca = pd.DataFrame(confusion_matrix(y_test, y_pred_gs), 
                                     index = ['actual 0', 'actual 1'], 
                                     columns = ['predicted 0', 'predicted 1'])

display(conf_matrix_baseline)
display('Baseline Random Forest recall score', recall_score(y_test, y_pred))
display(conf_matrix_baseline_pca)
display('Baseline Random Forest With PCA recall score', 
        recall_score(y_test, y_pred_pca))
display(conf_matrix_tuned_pca)
display('Hyperparameter Tuned Random Forest With PCA Reduced Dimensionality recall score',
        recall_score(y_test, y_pred_gs))


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值