Python训练营打卡DAY15

复习日

仔细回顾一下之前14天的内容,没跟上进度的同学补一下进度。

作业:

尝试找到一个kaggle或者其他地方的结构化数据集,用之前的内容完成一个全新的项目,这样你也是独立完成了一个专属于自己的项目。

地址:Brain Stroke Dataset

要求:

  1. 有数据地址的提供数据地址,没有地址的上传网盘贴出地址即可。
  2. 尽可能与他人不同,优先选择本专业相关数据集
  3. 探索一下开源数据的网站有哪些? uci.kaggle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")

plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系统常用黑体字体
plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
dt = pd.read_csv(r'stroke.csv')
dt.info()
dt.head()
dt.isnull().sum() #此数据集无需填补缺失值

features = dt.select_dtypes(include=['object']).columns.tolist()
print("\n分类特征取值分布:")
for i in features:
    print(f"\n特征 '{i}' 取值分布:")
    print(dt[i].value_counts(dropna=False))  # 显示包括空值的分布

# 对分类特征进行独热编码
dt = pd.get_dummies(dt, columns=['ever_married','Residence_type'], drop_first=True)  # drop_first=True 避免多重共线性
dt.head()
#独热编码后布尔型转换
dt['ever_married_Yes'] = dt['ever_married_Yes'].astype(int)
dt['Residence_type_Urban'] = dt['Residence_type_Urban'].astype(int)
dt.head()
# 对分类特征进行标签编码
mapping = {
    'gender': {'Male': 1, 'Female': 0},
    'work_type': {'Private': 3, 'Self-employed': 1, 'children': 0, 'Govt_job': 2},
    'smoking_status': {'formerly smoked': 2, 'never smoked': 0, 'smokes': 3, 'Unknown': 1}
}
features2 = ['gender','work_type','smoking_status']
for i in features2:
    dt[i] = dt[i].map(mapping[i])
dt.head()
dt.rename(columns={'gender':'Male'},inplace=True)
#连续特征归一化
features3 = ['age','avg_glucose_level','bmi']
for j in features3:
    dt[j] = (dt[j] - dt[j].min()) / (dt[j].max() - dt[j].min())
dt.head()
#连续特征热力图绘制
correlation_matrix = dt[features3].corr()
plt.rcParams['figure.dpi'] = 300
plt.figure(figsize=(12, 10))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1)
plt.title('连续特征相关性热力图2')
plt.show()
#离散特征感兴趣性别绘图
sns.histplot(dt['Male'])
plt.title('Male 的直方图')
plt.xlabel('Male')
plt.ylabel('Count')
plt.show()

plt.figure(figsize=(8, 6))
sns.countplot(x='Male', hue='stroke',data=dt)
plt.title('Gender vs. stroke')
plt.xlabel('Male')
plt.ylabel('Count')
plt.show()

#连续特征bmi感兴趣绘图
sns.boxplot(dt['bmi'])
plt.title('bmi 的箱线图')
plt.xlabel('bmi')
plt.show()

plt.figure(figsize=(8,6))
sns.violinplot(x='stroke', y='bmi', data=dt)
plt.title('bmi vs. stroke')
plt.xlabel('stroke')
plt.ylabel('bmi')
plt.show()

plt.figure(figsize=(8, 6))
sns.histplot(x='bmi', hue='stroke', data=dt, kde=True, element="step")
plt.title('bmi vs. stroke')
plt.xlabel('bmi')
plt.ylabel('Count')
plt.show()
#对数据绘图尽量在归一化前能有更好地认识

from sklearn.model_selection import train_test_split
X = dt.drop(['stroke'],axis=1)
y = dt['stroke']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)
print(f"训练集形状: {X_train.shape}, 测试集形状: {X_test.shape}")

from imblearn.over_sampling import SMOTE 
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
print("SMOTE过采样后训练集的形状:", X_train_smote.shape, y_train_smote.shape)

#训练模型与模型评估
from sklearn.svm import SVC #支持向量机分类器
from sklearn.neighbors import KNeighborsClassifier #K近邻分类器
from sklearn.linear_model import LogisticRegression #逻辑回归分类器
import xgboost as xgb #XGBoost分类器
import lightgbm as lgb #LightGBM分类器
from sklearn.ensemble import RandomForestClassifier #随机森林分类器
from catboost import CatBoostClassifier #CatBoost分类器
from sklearn.tree import DecisionTreeClassifier #决策树分类器
from sklearn.naive_bayes import GaussianNB #高斯朴素贝叶斯分类器
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # 用于评估分类器性能的指标
from sklearn.metrics import classification_report, confusion_matrix #用于生成分类报告和混淆矩阵
import warnings #用于忽略警告信息
warnings.filterwarnings("ignore") # 忽略所有警告信息
#未smote的数据集进行模型比对
#SVM
svm_model = SVC(random_state=42)
svm_model.fit(X_train,y_train)
svm_pred = svm_model.predict(X_test)
print("\nSVM 分类报告:")
print(classification_report(y_test, svm_pred))
print("SVM 混淆矩阵:")
print(confusion_matrix(y_test, svm_pred))  # 打印混淆矩阵
print("SVM 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, svm_pred):.4f}")
print(f"精确率: {precision_score(y_test, svm_pred):.4f}")
print(f"召回率: {recall_score(y_test, svm_pred):.4f}")
print(f"F1 值: {f1_score(y_test, svm_pred):.4f}")
#SVM smote
svm_model = SVC(random_state=42)
svm_model.fit(X_train_smote,y_train_smote)
svm_pred = svm_model.predict(X_test)
print("\nSVM 分类报告:")
print(classification_report(y_test, svm_pred))
print("SVM 混淆矩阵:")
print(confusion_matrix(y_test, svm_pred))  # 打印混淆矩阵
print("SVM 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, svm_pred):.4f}")
print(f"精确率: {precision_score(y_test, svm_pred):.4f}")
print(f"召回率: {recall_score(y_test, svm_pred):.4f}")
print(f"F1 值: {f1_score(y_test, svm_pred):.4f}") #明显影响了准确率
#逻辑回归
lr_model = LogisticRegression(random_state=42)
lr_model.fit(X_train,y_train)
lr_pred = lr_model.predict(X_test)
print("\n逻辑回归 分类报告:")
print(classification_report(y_test, lr_pred))
print("逻辑回归 混淆矩阵:")
print(confusion_matrix(y_test, lr_pred))  # 打印混淆矩阵
print("逻辑回归 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, lr_pred):.4f}")
print(f"精确率: {precision_score(y_test, lr_pred):.4f}")
print(f"召回率: {recall_score(y_test, lr_pred):.4f}")
print(f"F1 值: {f1_score(y_test, lr_pred):.4f}")
#逻辑回归 smote
lr_model = LogisticRegression(random_state=42)
lr_model.fit(X_train_smote,y_train_smote)
lr_pred = lr_model.predict(X_test)
print("\n逻辑回归 分类报告:")
print(classification_report(y_test, lr_pred))
print("逻辑回归 混淆矩阵:")
print(confusion_matrix(y_test, lr_pred))  # 打印混淆矩阵
print("逻辑回归 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, lr_pred):.4f}")
print(f"精确率: {precision_score(y_test, lr_pred):.4f}")
print(f"召回率: {recall_score(y_test, lr_pred):.4f}")
print(f"F1 值: {f1_score(y_test, lr_pred):.4f}") #影响了准确率
#XGBoost
xgb_model = xgb.XGBClassifier(random_state=42)
xgb_model.fit(X_train,y_train)
xgb_pred = xgb_model.predict(X_test)
print("\nXGBoost 分类报告:")
print(classification_report(y_test, xgb_pred))
print("XGBoost 混淆矩阵:")
print(confusion_matrix(y_test, xgb_pred))  # 打印混淆矩阵
print("XGBoost 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, xgb_pred):.4f}")
print(f"精确率: {precision_score(y_test, xgb_pred):.4f}")
print(f"召回率: {recall_score(y_test, xgb_pred):.4f}")
print(f"F1 值: {f1_score(y_test, xgb_pred):.4f}")   
#XGBoost smote
xgb_model = xgb.XGBClassifier(random_state=42)
xgb_model.fit(X_train_smote,y_train_smote)
xgb_pred = xgb_model.predict(X_test)
print("\nXGBoost 分类报告:")
print(classification_report(y_test, xgb_pred))
print("XGBoost 混淆矩阵:")
print(confusion_matrix(y_test, xgb_pred))  # 打印混淆矩阵
print("XGBoost 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, xgb_pred):.4f}")
print(f"精确率: {precision_score(y_test, xgb_pred):.4f}")
print(f"召回率: {recall_score(y_test, xgb_pred):.4f}")
print(f"F1 值: {f1_score(y_test, xgb_pred):.4f}") #SMOTE可以考虑
#LightGBM
lgb_model = lgb.LGBMClassifier(random_state=42)
lgb_model.fit(X_train,y_train)
lgb_pred = lgb_model.predict(X_test)
print("\nLightGBM 分类报告:")
print(classification_report(y_test, lgb_pred))
print("LightGBM 混淆矩阵:")
print(confusion_matrix(y_test, lgb_pred))  # 打印混淆矩阵
print("LightGBM 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, lgb_pred):.4f}")
print(f"精确率: {precision_score(y_test, lgb_pred):.4f}")
print(f"召回率: {recall_score(y_test, lgb_pred):.4f}")
print(f"F1 值: {f1_score(y_test, lgb_pred):.4f}")
#LightGBM smote
lgb_model = lgb.LGBMClassifier(random_state=42)
lgb_model.fit(X_train_smote,y_train_smote)
lgb_pred = lgb_model.predict(X_test)
print("\nLightGBM 分类报告:")
print(classification_report(y_test, lgb_pred))
print("LightGBM 混淆矩阵:")
print(confusion_matrix(y_test, lgb_pred))  # 打印混淆矩阵
print("LightGBM 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, lgb_pred):.4f}")
print(f"精确率: {precision_score(y_test, lgb_pred):.4f}")
print(f"召回率: {recall_score(y_test, lgb_pred):.4f}")
print(f"F1 值: {f1_score(y_test, lgb_pred):.4f}") #smote可以考虑!
#随机森林
rf_model = RandomForestClassifier(random_state=42)
rf_model.fit(X_train,y_train)
rf_pred = rf_model.predict(X_test)
print("\n随机森林 分类报告:")
print(classification_report(y_test, rf_pred))
print("随机森林 混淆矩阵:")
print(confusion_matrix(y_test, rf_pred))  # 打印混淆矩阵
print("随机森林 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, rf_pred):.4f}")
print(f"精确率: {precision_score(y_test, rf_pred):.4f}")
print(f"召回率: {recall_score(y_test, rf_pred):.4f}")
print(f"F1 值: {f1_score(y_test, rf_pred):.4f}")
#随机森林 smote
rf_model = RandomForestClassifier(random_state=42)
rf_model.fit(X_train_smote,y_train_smote)
rf_pred = rf_model.predict(X_test)
print("\n随机森林 分类报告:")
print(classification_report(y_test, rf_pred))
print("随机森林 混淆矩阵:")
print(confusion_matrix(y_test, rf_pred))  # 打印混淆矩阵
print("随机森林 模型评估指标:")
print(f"准确率: {accuracy_score(y_test, rf_pred):.4f}")
print(f"精确率: {precision_score(y_test, rf_pred):.4f}")
print(f"召回率: {recall_score(y_test, rf_pred):.4f}")
print(f"F1 值: {f1_score(y_test, rf_pred):.4f}") #可以考虑
#对lgb进行调参
from sklearn.model_selection import GridSearchCV
import time
param_grid = {
    'n_estimators': [50, 100, 200],  # 树的数量
    'learning_rate': [0.01, 0.1, 0.2],  # 学习率
    'max_depth': [3, 5, 7],  # 树的最大深度
    'subsample': [0.8, 0.9, 1.0],  # 子样本比例
    'colsample_bytree': [0.8, 0.9, 1.0]  # 特征列比例
}
# 创建网格搜索对象
grid_search = GridSearchCV(estimator=lgb_model, param_grid=param_grid, cv=5, scoring='accuracy')
start_time = time.time()
# 在训练集上进行网格搜索
grid_search.fit(X_train, y_train) # 在训练集上训练,模型实例化和训练的方法都被封装在这个网格搜索对象里了
end_time = time.time()

print(f"网格搜索耗时: {end_time - start_time:.4f} 秒")
print("最佳参数: ", grid_search.best_params_) #best_params_属性返回最佳参数组合

# 使用最佳参数的模型进行预测
best_model = grid_search.best_estimator_ # 获取最佳模型
best_pred = best_model.predict(X_test) # 在测试集上进行预测

print("\n网格搜索优化后的随机森林 在测试集上的分类报告:")
print(classification_report(y_test, best_pred))
print("网格搜索优化后的随机森林 在测试集上的混淆矩阵:")
print(confusion_matrix(y_test, best_pred))
print(f"准确率: {accuracy_score(y_test, best_pred):.4f}")
print(f"精确率: {precision_score(y_test, best_pred):.4f}")
print(f"召回率: {recall_score(y_test, best_pred):.4f}")
#

# 在训练集上进行网格搜索
start_time = time.time()
grid_search.fit(X_train_smote, y_train_smote) # 在smote训练集上训练,模型实例化和训练的方法都被封装在这个网格搜索对象里了
end_time = time.time()

print(f"网格搜索耗时: {end_time - start_time:.4f} 秒")
print("最佳参数: ", grid_search.best_params_) #best_params_属性返回最佳参数组合

# 使用最佳参数的模型进行预测
best_model = grid_search.best_estimator_ # 获取最佳模型
best_pred = best_model.predict(X_test) # 在测试集上进行预测

print("\n网格搜索优化后的随机森林 在测试集上的分类报告:")
print(classification_report(y_test, best_pred))
print("网格搜索优化后的随机森林 在测试集上的混淆矩阵:")
print(confusion_matrix(y_test, best_pred))
print(f"准确率: {accuracy_score(y_test, best_pred):.4f}")
print(f"精确率: {precision_score(y_test, best_pred):.4f}")
print(f"召回率: {recall_score(y_test, best_pred):.4f}")  #有微弱的优化,提升不大
#调整权重
from sklearn.utils.class_weight import compute_class_weight
from sklearn.model_selection import StratifiedKFold, cross_validate
from sklearn.metrics import make_scorer, precision_score, recall_score, f1_score

counts = np.bincount(y_train)
minority_label = np.argmin(counts) # 找到计数最少的类别的标签
majority_label = np.argmax(counts)
print(f"训练集中各类别数量: {counts}")
print(f"少数类标签: {minority_label}, 多数类标签: {majority_label}")
best_params = grid_search.best_params_ #使用最佳参数
# 修正 LightGBM 权重初始化
class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
lgb_model_weighted = lgb.LGBMClassifier(
    **best_params,
    random_state=42,
    class_weight={0: class_weights[0], 1: class_weights[1]}
)

# 设置交叉验证策略 (使用 StratifiedKFold 保证每折类别比例相似)
cv_strategy = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) # 5折交叉验证

# 定义用于交叉验证的评估指标
# 特别关注少数类的指标,使用 make_scorer 指定 pos_label
# 注意:如果你的少数类标签不是 1,需要修改 pos_label
scoring = {
    'accuracy': 'accuracy',
    'precision_minority': make_scorer(precision_score, pos_label=minority_label, zero_division=0),
    'recall_minority': make_scorer(recall_score, pos_label=minority_label),
    'f1_minority': make_scorer(f1_score, pos_label=minority_label)
}

print(f"开始进行 {cv_strategy.get_n_splits()} 折交叉验证...")
start_time_cv = time.time()

# cross_validate 会自动完成训练和评估过程
cv_results = cross_validate(
    estimator=lgb_model_weighted,
    X=X_train,
    y=y_train,
    cv=cv_strategy,
    scoring=scoring,
    n_jobs=-1, # 使用所有可用的 CPU 核心
    return_train_score=False # 通常我们更关心测试折的得分
)

end_time_cv = time.time()
print(f"交叉验证耗时: {end_time_cv - start_time_cv:.4f} 秒")

# 打印交叉验证结果的平均值
print("\n带权重lgb 交叉验证平均性能 (基于训练集划分):")
for metric_name, scores in cv_results.items():
    if metric_name.startswith('test_'): # 我们关心的是在验证折上的表现
# 提取指标名称(去掉 'test_' 前缀)
        clean_metric_name = metric_name.split('test_')[1]
        print(f"平均 {clean_metric_name}: {np.mean(scores):.4f} (+/- {np.std(scores):.4f})")
print("-" * 50)

print("---  训练最终的带权重模型 (整个训练集) 并在测试集上评估 ---")
start_time_final = time.time()
lgb_model_weighted_final = lgb.LGBMClassifier(
    **best_params,
    random_state=42,
    class_weight={0: class_weights[0], 1: class_weights[1]}
)
lgb_model_weighted_final.fit(X_train, y_train)
lgb_pred_weighted = lgb_model_weighted_final.predict(X_test)

# 修正评估部分的变量名
print("\n带权重lgb 在测试集上的混淆矩阵:")
print(confusion_matrix(y_test, lgb_pred_weighted))
print(f"准确率: {accuracy_score(y_test, lgb_pred_weighted):.4f}")
print(f"精确率: {precision_score(y_test, lgb_pred_weighted):.4f}")
print(f"召回率: {recall_score(y_test, lgb_pred_weighted, pos_label=minority_label):.4f}")
print("-" * 50)


counts = np.bincount(y_train)
minority_label = np.argmin(counts) # 找到计数最少的类别的标签
majority_label = np.argmax(counts)
print(f"训练集中各类别数量: {counts}")
print(f"少数类标签: {minority_label}, 多数类标签: {majority_label}")
best_params = grid_search.best_params_ #使用最佳参数
# 修正 LightGBM 权重初始化
class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
lgb_model_weighted = lgb.LGBMClassifier(
    **best_params,
    random_state=42,
    class_weight={0: class_weights[0], 1: class_weights[1]}
)

# 设置交叉验证策略 (使用 StratifiedKFold 保证每折类别比例相似)
cv_strategy = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) # 5折交叉验证

# 定义用于交叉验证的评估指标
# 特别关注少数类的指标,使用 make_scorer 指定 pos_label
# 注意:如果你的少数类标签不是 1,需要修改 pos_label
scoring = {
    'accuracy': 'accuracy',
    'precision_minority': make_scorer(precision_score, pos_label=minority_label, zero_division=0),
    'recall_minority': make_scorer(recall_score, pos_label=minority_label),
    'f1_minority': make_scorer(f1_score, pos_label=minority_label)
}

print(f"开始进行 {cv_strategy.get_n_splits()} 折交叉验证...")
start_time_cv = time.time()

# cross_validate 会自动完成训练和评估过程
cv_results = cross_validate(
    estimator=lgb_model_weighted,
    X=X_train_smote,
    y=y_train_smote,
    cv=cv_strategy,
    scoring=scoring,
    n_jobs=-1, # 使用所有可用的 CPU 核心
    return_train_score=False # 通常我们更关心测试折的得分
)

end_time_cv = time.time()
print(f"交叉验证耗时: {end_time_cv - start_time_cv:.4f} 秒")

# 打印交叉验证结果的平均值
print("\n带权重lgb 交叉验证平均性能 (基于训练集划分):")
for metric_name, scores in cv_results.items():
    if metric_name.startswith('test_'): # 我们关心的是在验证折上的表现
# 提取指标名称(去掉 'test_' 前缀)
        clean_metric_name = metric_name.split('test_')[1]
        print(f"平均 {clean_metric_name}: {np.mean(scores):.4f} (+/- {np.std(scores):.4f})")
print("-" * 50)
print("---  训练最终的带权重模型 (整个smote训练集) 并在测试集上评估 ---")
start_time_final = time.time()
lgb_model_weighted_final = lgb.LGBMClassifier(
    **best_params,
    random_state=42,
    class_weight={0: class_weights[0], 1: class_weights[1]}
)
lgb_model_weighted_final.fit(X_train_smote, y_train_smote)
lgb_pred_weighted = lgb_model_weighted_final.predict(X_test)

# 修正评估部分的变量名
print("\n带权重lgb 在测试集上的混淆矩阵:")
print(confusion_matrix(y_test, lgb_pred_weighted))
print(f"准确率: {accuracy_score(y_test, lgb_pred_weighted):.4f}")
print(f"精确率: {precision_score(y_test, lgb_pred_weighted):.4f}")
print(f"召回率: {recall_score(y_test, lgb_pred_weighted, pos_label=minority_label):.4f}")
print("-" * 50)

# 最终使用lgb网格搜索调参且调整权重后进行shap分析
import shap
import matplotlib.pyplot as plt
explainer = shap.TreeExplainer(lgb_model_weighted_final)
shap_values = explainer.shap_values(X_test)
import numpy as np
shap_values = np.array(shap_values)
print(shap_values.shape)
shap_values.shape
print("shap_values shape:", shap_values.shape)
print("shap_values[0] shape:", shap_values[0].shape)
print("shap_values[0, :, :] shape:", shap_values[0, :, :].shape)
print("X_test shape:", X_test.shape)

# --- 1. SHAP 特征重要性条形图 (Summary Plot - Bar) ---
print("--- 1. SHAP 特征重要性条形图 ---")
shap.summary_plot(shap_values[0], X_test, plot_type="bar",show=False)
plt.title('SHAP 特征重要性条形图')  
plt.show()

# --- 2. SHAP 特征重要性 violin 图 (Summary Plot - violin) ---
print("--- 2. SHAP 特征重要性 violin 图 ---")
shap.summary_plot(shap_values[0], X_test, plot_type="violin",show=False)
plt.title('SHAP 特征重要性 violin 图')
plt.show()

# --- 3. SHAP 依赖图 ---
print("--- 3. SHAP 依赖图 ---")
shap.dependence_plot("age", shap_values[0], X_test, interaction_index=None,show=False)
plt.title('SHAP 依赖图')
plt.show()

# --- 4. SHAP 力图 ---
print("--- 4. SHAP 力图 ---")
shap.force_plot(explainer.expected_value[0], shap_values[0][0], X_test.iloc[0],matplotlib=True,show=False,text_rotation=30)
plt.title('SHAP 力图')
plt.show()

# --- 5. SHAP 决策图 ---
print("--- 5. SHAP 决策图 ---")
shap.decision_plot(explainer.expected_value[0], shap_values[0][:50], X_test.iloc[:50],feature_order='hclust',show=False)
plt.title('SHAP 决策图')
plt.show()

# 使用普通lgb进行shap分析
import shap
import matplotlib.pyplot as plt
explainer = shap.TreeExplainer(lgb_model)
shap_values = explainer.shap_values(X_test)
import numpy as np
shap_values = np.array(shap_values)
print(shap_values.shape)
shap_values.shape
print("shap_values shape:", shap_values.shape)
print("shap_values[0] shape:", shap_values[0].shape)
print("shap_values[0, :, :] shape:", shap_values[0, :, :].shape)
print("X_test shape:", X_test.shape)
print("---  SHAP 特征重要性 violin 图 ---")
shap.summary_plot(shap_values[0], X_test, plot_type="violin",show=False)
plt.title('SHAP 特征重要性 violin 图')
plt.show()

复盘:在此脑卒中数据集 上,逻辑回归、LightGBM、随机森林表现最好,但LGB在smote后仍表现较好,选择这个模型进行更深的处理;过采样后所有模型精度效能都有所下降,调参以及调整权重优化效果甚小,并且smote后调参、权重调整得到的模型不如原始未处理的模型效能(建议是否考虑过采样的使用);shap分析得到的结果有点奇怪,需更深入思考及研究。

其实这证明此数据集数据质量较好无需过度处理;一般看数据先看数据分布是否均衡。

浙大疏锦行-CSDN博客@浙大疏锦行

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值