Task5 模型融合

#5.4.1 回归|分类概率-融合:

导入工具包

import numpy as np
import pandas as pd
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

from sklearn import metrics
from sklearn import linear_model
from sklearn.datasets import make_blobs
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier

from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
from sklearn.metrics import accuracy_score,roc_auc_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split

from mlxtend.classifier import StackingClassifier
from mlxtend.plotting import plot_learning_curves
from mlxtend.plotting import plot_decision_regions

from xgboost import XGBClassifier

import warnings
warnings.filterwarnings("ignore")

##5.4.1-1 简单加权平均,结果直接融合

## 生成一些简单的样本数据,test_prei 代表第i个模型的预测值
test_pre1 = [1.2, 3.2, 2.1, 6.2]
test_pre2 = [0.9, 3.1, 2.0, 5.9]
test_pre3 = [1.1, 2.9, 2.2, 6.0]

#y_test_true 代表第模型的真实值
y_test_true = [1, 3, 2, 6]

## 定义结果的加权平均数
def Weighted_method(test_pre1, test_pre2, test_pre3, w=[1/3, 1/3, 1/3]):
    Weighted_result = w[0]*pd.Series(test_pre1)+w[1]*pd.Series(test_pre2)+w[2]*pd.Series(test_pre3)
    return Weighted_result

#各模型的预测结果计算MAE
print('Pred1 MAE:', metrics.mean_absolute_error(y_test_true, test_pre1))
print('Pred2 MAE:', metrics.mean_absolute_error(y_test_true, test_pre2))
print('Pred3 MAE:', metrics.mean_absolute_error(y_test_true, test_pre3))

Pred1 MAE: 0.1750000000000001
Pred2 MAE: 0.07499999999999993
Pred3 MAE: 0.10000000000000009

##根据加权计算MAE
w = [0.3,0.4,0.3] #定义比重权值
Weighted_pre = Weighted_method(test_pre1, test_pre2, test_pre3,w)
print('Weighted_pre MAE:',metrics.mean_absolute_error(y_test_true, Weighted_pre))

#可发现加权结果相对于之前的结果时又提升的,我们称其为简单的加权平均

eighted_pre MAE: 0.05750000000000027

#还有一些特殊的形式,比如mean平均,median平均
## 定义结果的加权平均数
def Mean_method(test_pre1, test_pre2, test_pre3):
    Mean_result = pd.concat([pd.Series(test_pre1),pd.Series(test_pre2),pd.Series(test_pre3)],axis=1).mean(axis=1)
    return Mean_result

Mean_pre = Mean_method(test_pre1,test_pre2,test_pre3)
print('Mean_pre MAE:', metrics.mean_absolute_error(y_test_true, Mean_pre))

Mean_pre MAE: 0.06666666666666693

 定义结果的加权平均函数
def Median_method(test_pre1, test_pre2, test_pre3):
    Median_result = pd.concat([pd.Series(test_pre1),pd.Series(test_pre2),pd.Series(test_pre3)],axis=1).median(axis=1)
    return Median_result
    
Median_pre = Median_method(test_pre1,test_pre2,test_pre3)
print('Median_pre MAE:',metrics.mean_absolute_error(y_test_true, Median_pre))

Median_pre MAE: 0.07500000000000007

##5.4.1-2 Stacking融合(回归)

def Stacking_method(train_reg1,train_reg2,train_reg3,y_train_true,test_pre1,test_pre2,test_pre3,
                    model_L2= linear_model.LinearRegression()):
    model_L2.fit(pd.concat([pd.Series(train_reg1),pd.Series(train_reg2),pd.Series(train_reg3)],axis=1).values,
                 y_train_true)
    Stacking_result = model_L2.predict(pd.concat([pd.Series(test_pre1),pd.Series(test_pre2),pd.Series(test_pre3)],
                                                 axis=1).values)
    return Stacking_result

##生成一些简单的样本数据, test_prei 代表第i个模型的预测值
train_reg1 = [3.2, 8.2, 9.1, 5.2]
train_reg2 = [2.9, 8.1, 9.0, 4.9]
train_reg3 = [3.1, 7.9, 9.2, 5.0]
#y_test_true 代表第模型的真实值
y_train_true = [3, 8, 9, 5]

test_pre1 = [1.2, 3.2, 2.1, 6.2]
test_pre2 = [0.9, 3.1, 2.0, 5.9]
test_pre3 = [1.1, 2.9, 2.2, 6.0]

# y_test_true 代表第模型的真实值
y_test_true= [1, 3, 2, 6]

model_L2 = linear_model.LinearRegression()
Stacking_pre = Stacking_method(train_reg1,train_reg2,train_reg3,y_train_true,
                              test_pre1,test_pre2,test_pre3,model_L2)
print('Stacking_pre MAE:',metrics.mean_absolute_error(y_test_true, Stacking_pre))
#可发现模型结果相对于之前又进一步的提升。
#需要注意第二层Stacking的模型不宜选取的过于复杂,否则会导致模型再训练集上过拟合,从而使得测试集上不能达到很好的效果。

Stacking_pre MAE: 0.04213483146067476

5.4.2 分类模型融合

##5.4.2-1 Voting投票机制

#原理:少数服从多数,分为软投票和应投票
'''应投票:对多个模型直接投,不区分模型结果的相对重要度,最终票数最多的类为最终别预测的类'''



iris = datasets.load_iris()

x=iris.data
y=iris.target
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)

clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2,subsample=0.7,
                    colsample_bytree=0.6, objective='binary:logistic')
clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
                             min_samples_leaf=63,oob_score=True)
clf3 = SVC(C=0.1)

#硬投票
eclf = VotingClassifier(estimators=[('xgb',clf1), ('rf', clf2), ('svc', clf3)], voting='hard')
for clf, label in zip([clf1, clf2, clf3, eclf], ['XGBBoosting', 'Random Forest', 'SVM','Ensemble']):
    scores = cross_val_score(clf, x, y, cv=5, scoring='accuracy')
    print("Accuracy: %0.2f (+/- %0.2f)[%s]" % (scores.mean(), scores.std(), label))

Accuracy: 0.96 (+/- 0.02)[XGBBoosting]
Accuracy: 0.33 (+/- 0.00)[Random Forest]
Accuracy: 0.95 (+/- 0.03)[SVM]
Accuracy: 0.96 (+/- 0.02)[Ensemble]

"""软投票:和应投票原理相同,增加了设置权重的功能,可以为不同模型设置不同权重,进而区别模型不同的重要度"""
x=iris.data
y=iris.target
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
clf1 = XGBClassifier(learning_rate=0.1, n_estimators=150, max_depth=3, min_child_weight=2,subsample=0.8,
                    colsample_bytree=0.8, objective='binary:logistic')
clf2 = RandomForestClassifier(n_estimators=50, max_depth=1, min_samples_split=4,
                             min_samples_leaf=63,oob_score=True)
clf3 = SVC(C=0.1, probability=True)


#软投票
eclf = VotingClassifier(estimators=[('xgb', clf1),('rf', clf2), ('svc', clf3)],voting='soft',weights=[2,1,1])
clf1.fit(x_train, y_train)

for clf, label in zip([clf1,clf2,clf3,eclf], ['XGBBoosting','Random Forest', 'SVM','Ensemble']):
    scores = cross_val_score(clf,x,y, cv=5, scoring='accuracy')
    print("Accuracy: %0.2f (+/- %0.2f)[%s]" % (scores.mean(), scores.std(),label))

Accuracy: 0.96 (+/- 0.02)[XGBBoosting]
Accuracy: 0.33 (+/- 0.00)[Random Forest]
Accuracy: 0.95 (+/- 0.03)[SVM]
Accuracy: 0.96 (+/- 0.02)[Ensemble]

##5.4.2-2 分类的Stacking\Blending融合

'''stacking是一种分层模型集成框架
   以两层为例,第一层由多个基学习器组成,其输入为原始训练集
   第二层的模型则是以第一层基学习器的输出作为训练集进行再训练,从而得到完整的stacking模型, 
   stacking两层模型都使用了全部的训练数据。
'''
# 5-Fold Stacking

#创建新的数据集
data_0 = iris.data
data = data_0[:100,:]

target_0 = iris.target
target = target_0[:100]

#模型融合中使用到的各个单模型
clfs = [LogisticRegression(solver='lbfgs'),
        RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),
        ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion=' gini'),
        ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='entropy'),
        GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=5)]

#切分一部分数据作为测试集
X, X_predict,y,y_predict = train_test_split(data, target, test_size=0.3, random_state=2020)

dataset_blend_train = np.zeros((X.shape[0],len(clfs)))
dataset_blend_test = np.zeros((X_predict.shape[0], len(clfs)))

#5折stacking
n_splits = 5
skf = StratifiedKFold(n_splits)
skf = skf.split(X,y)
for j, clf in enumerate(clfs):
    #依次训练各个单模型
    dataset_blend_test_j = np.zeros((X_predict.shape[0],5))
    for i,(train, test) in enumerate(skf):
        #5-Fold交叉训练,使用第i个部分作为预测,剩余的部分来训练模型,获得其预测的输出作为第i部分的新特征
        X_train,y_train, X_test, y_test = x[train],y[train],X[test],y[test]
        clf.fit(X_train, y_train)
        y_submission = clf.predict_proba(X_test)[:,1]
        dataset_blend_train[test,j] = y_submission
        dataset_blend_test_j[:,i] = clf.predict_proba(X_predict)[:,1]
    #对于测试集,直接用k这个模型的预测值均值作为新的特征
    dataset_blend_test[:,j] = dataset_blend_test_j.mean(1)
    print("val auc Score: %f" % roc_auc_score(y_predict, dataset_blend_test[:,j]))
    
clf = LogisticRegression(solver='lbfgs')

clf.fit(dataset_blend_train,y)
y_submission= clf.predict_proba(dataset_blend_test)[:,1]

print("Val auc Score of Stacking: %f" % (roc_auc_score(y_predict, y_submission)))

val auc Score: 1.000000
val auc Score: 0.500000
val auc Score: 0.500000
val auc Score: 0.500000
val auc Score: 0.500000
Val auc Score of Stacking: 1.000000

'''Blending
blending和stacking是一种类似的多层模型融合;
主要思路:
    将原始训练集分为两部分,例如70%作新的训练集,剩下30%作测试集
    
第一层:再70%的数据上训练多个模型,取预测30%数据的label,同时也预测test集的label
第二层:直接用30%数据再第一层预测的结果作为新特征继续训练,然后用test集第一层预测的label作特征,用第二层训练的模型作进一步预测
优点:1)比stacking简单(不用进行kc的交叉验证来获得stacker feature 2)避开了信息泄露的问题
缺点:1)使用数据太少 2)blender可能会过拟合 3)stacking使用多次交叉验证会比较稳健

'''

#创建训练的数据集
data_0 = iris.data
data = data_0[:100,:]
target_0 = iris.target
target = target_0[:100]

#模型融合中使用到的各个单模型
clfs = [LogisticRegression(solver='lbfgs'),
        RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),
        RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='entropy'),
        ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),
        GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=5)]

#切分一部分数据作为测试集
X, X_predict,y,y_predict = train_test_split(data, target, test_size=0.3, random_state=2020)

#切分训练数据集为d1,d2两部分
X_d1, X_d2, y_d1, y_d2 = train_test_split(X, y, test_size=0.5, random_state=2020)
dataset_d1 = np.zeros((X_d2.shape[0],len(clfs)))
dataset_d2 = np.zeros((X_predict.shape[0], len(clfs)))


for j, clf in enumerate(clfs):
    #依次训练各个单模型
    clf.fit(X_d1, y_d1)
    y_submission = clf.predict_proba(X_d2)[:,1]
    dataset_d1[:, j] = y_submission
    #对于测试集,直接用k这个模型的预测值作为新的特征。
    dataset_d2[:, j] = clf.predict_proba(X_predict)[:,1]
    print("val auc Score: %f" % roc_auc_score(y_predict, dataset_d2[:, j]))

#融合使用的模型
clf = GradientBoostingClassifier(learning_rate=0.02, subsample=0.5, max_depth=6, n_estimators=30)
clf.fit(dataset_d1,y_d2)
y_submission= clf.predict_proba(dataset_d2)[:,1]

print("Val auc Score of Stacking: %f" % (roc_auc_score(y_predict, y_submission)))

val auc Score: 1.000000
val auc Score: 1.000000
val auc Score: 1.000000
val auc Score: 1.000000
val auc Score: 1.000000
Val auc Score of Stacking: 1.000000

5.4.2-3 分类的Stacking融合(利用mlxtend)

# 以python自带的鸢尾花数据集为例
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target

clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
lr = LogisticRegression()
sclf = StackingClassifier(classifiers=[clf1,clf2, clf3], meta_classifier=lr)

label = ['KNN', 'Random Forest', 'Naive Bayes', 'Stacking Classifier']
clf_list = [clf1, clf2, clf3, sclf]

fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2,2)
grid = itertools.product([0,1],repeat=2)

clf_cv_mean = []
clf_cv_std = []
for clf, label, grd in zip(clf_list, label, grid):
    
    scores = cross_val_score(clf, X, y, cv=3, scoring='accuracy')
    print("Accuracy: %.2f(+/- %.2f) [%s]" %(scores.mean(), scores.std(),label))
    clf_cv_mean.append(scores.mean())
    clf_cv_std.append(scores.std())
    
    clf.fit(X,y)
    ax = plt.subplot(gs[grd[0], grd[1]])
    fig = plot_decision_regions(X=X, y=y, clf=clf)
    plt.title(label)
    
plt.show()

#可以发现基模型用'KNN', 'Random Forest', 'Naive Bayes' 然后再这基础上次级模型加一个 'LogisticRegression',模型测试效果有着很好的提升。

在这里插入图片描述

5.4.3 一些其他方法

'''
    将特征放进模型中预测,并将预测结果变换并作为新的特征加入原有特征中再经过模型预测结果(stacking变化)
    可将反复预测多次将结果加入最后的特征中
'''
def Ensemble_add_feature(train,test,target,clfs):
    
    # n_flods = 5
    # skf = list(StratifiedKFold(y, n_folds=n_flods))
    
    train_ = np.zeros((train.shape[0],len(clfs*2)))
    test_ = np.zeros((test.shape[0],len(clfs*2)))
    
    for j,clf in enumerate(clfs):
        '''依次训练各个单模型'''
        # pritn(j, clf)
        '''使用第1个部分作为预测,第2部分来训练模型,获得其预测的输出作为第2部分的新特征。'''
        # X_train, y_train, X_test, y_test = X[train], y[train], X[test], y[test]
        
        clf.fit(train,target)
        y_train = clf.predict(train)
        y_test = clf.predict(test)
        
        ## 新特征生成
        train_[:,j*2] = y_train**2
        test_[:,j*2] = y_test**2
        train_[:, j+1] = np.exp(y_train)
        test_[:, j+1] = np.exp(y_test)
        # print("val auc Score: %f" % r2_score(y_predict, dataset_d2[:, j]))
        print('Method ',j)
              
    train_ = pd.DataFrame(train_)
    test_ = pd.DataFrame(test_)
    return train_, test_
clf = LogisticRegression()

data_0 = iris.data
data = data_0[:100,:]

target_0 = iris.target
target = target_0[:100]

x_train,x_test,y_train,y_test = train_test_split(data,target,test_size=0.3)
x_train = pd.DataFrame(x_train)
x_test = pd.DataFrame(x_test)

#模型融合中使用到的各个单模型
clfs = [LogisticRegression(),
        RandomForestClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),
        ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='gini'),
        ExtraTreesClassifier(n_estimators=5, n_jobs=-1, criterion='entropy'),
        GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=5)]

New_train,New_test = Ensemble_add_feature(x_train,x_test,y_train,clfs)

clf = LogisticRegression()
clf.fit(New_train, y_train)
y_emb = clf.predict_proba(New_test)[:,1]

print("Val auc Score of stacking: %f" % (roc_auc_score(y_test, y_emb)))

Method 0
Method 1
Method 2
Method 3
Method 4
Val auc Score of stacking: 1.000000

5.4.4 本赛题示例

import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import warnings
import seaborn as sns

warnings.filterwarnings("ignore")
%matplotlib inline
#忽略警告


import itertools
import matplotlib.gridspec as gridspec
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.model_selection import StratifiedKFold


from sklearn import linear_model
from sklearn import preprocessing
from sklearn.svm import SVR
from sklearn.decomposition import PCA,FastICA,FactorAnalysis,SparsePCA

import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import GridSearchCV,cross_val_score
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.metrics import mean_squared_error, mean_absolute_error
#数据读取
Train_data = pd.read_csv(r'D:\study\OLL\used_car_train_20200313.csv', sep=' ')
TestA_data = pd.read_csv(r'D:\study\OLL\used_car_testA_20200313.csv', sep=' ')

print(Train_data.shape)
print(TestA_data.shape)

Train_data.head()
numerical_cols = Train_data.select_dtypes(exclude = 'object').columns
print(numerical_cols)
feature_cols = [col for col in numerical_cols if col not in ['SaleID','name','regDate','price']]

X_data = Train_data[feature_cols]
Y_data = Train_data['price']

X_test = TestA_data[feature_cols]

print('X train shape:', X_data.shape)
print('X test shape:',X_test.shape)
def Sta_inf(data):
    print('_min',np.min(data))
    print('_max',np.max(data))
    print('_mean',np.mean(data))
    print('_ptp',np.ptp(data))
    print('_std',np.std(data))
    print('_var',np.var(data))
    
print('Sta of label:')
Sta_inf(Y_data)
def build_model_lr(x_train,y_train):
    reg_model = linear_model.LinearRegression()
    reg_model.fit(x_train,y_train)
    return reg_model

def build_model_ridge(x_train,y_train):
    reg_model = linear_model.Ridge(alpha=0.8) #alphas=range(1,100,5)
    reg_model.fit(x_train,y_train)
    return reg_model

def build_model_lasso(x_train,y_train):
    reg_model = linear_model.LassoCV()
    reg_model.fit(x_train,y_train)
    return reg_model

def build_model_gbdt(x_train,y_train):
    estimator=GradientBoostingRegressor(loss='ls',subsample=0.85, max_depth=5,n_estimators=100)
    param_grid = {
        'learning_rate':[0.05,0.08,0.1,0.2],
        }
    gbdt = GridSearchCV(estimator, param_grid,cv=3)
    gbdt.fit(x_train,y_train)
    print(gbdt.best_params_)
    return gbdt

def build_model_lgb(x_train,y_train):
    estimator = lgb.LGBMRegressor(num_leaves=64, n_estimators=100)
    param_grid = {
        'learning_rate':[0.01, 0.05, 0.1],
    }
    gbm = GridSearchCV(estimator, param_grid)
    gbm.fit(x_train, y_train)
    return gbm

2) XGBoost 的五折交叉回归验证实现

## xgb
xgr = xgb.XGBRegressor(n_estimators=120, learning_rate=0.1, subsample=0.8,\
                      colsample_bytree=0.9, max_depth=7) #,objective='reg:squarederror'

scores_train = []
scores = []

## 5折交叉验证方式
sk=StratifiedKFold(n_splits=5,shuffle=True,random_state=0)
for train_ind,val_ind in sk.split(X_data,Y_data):
    
    train_x=X_data.iloc[train_ind].values
    train_y=Y_data.iloc[train_ind]
    val_x=X_data.iloc[val_ind].values
    val_y=Y_data.iloc[val_ind]
    
    xgr.fit(train_x,train_y)
    pred_train_xgb=xgr.predict(train_x)
    pred_xgb=xgr.predict(val_x)
    
    score_train = mean_absolute_error(train_y,pred_train_xgb)
    scores_train.append(score_train)
    score = mean_absolute_error(val_y,pred_xgb)
    scores.append(scores)
    
print('Train mae:',np.mean(score_train))
print('Val mae',np.mean(scores))

3) 划分数据集,并用多种方法训练和预测

##Split data with val
x_train,x_val,y_train,y_val = train_test_split(X_data,Y_data,test_size=0.3)

##Train and Predict
print('Predict LR...')
model_lr = build_model_lr(x_train,y_train)
val_lr = model_lr.predict(x_val)
subA_lr = model_lr.predict(X_test)

print('Predict Ridge...')
model_ridge = build_model_ridge(x_train,y_train)
val_ridge = model_ridge.predict(x_val)
subA_ridge = model_ridge.predict(X_test)

print('Predict Lasso...')
model_lasso = build_model_lasso(x_train,y_train)
val_lasso = model_ridge.predict(x_val)
subA_lasso = model_ridge.predict(X_test)

print('Predict GBDT...')
model_gbdt = build_model_gbdt(x_train,y_train)
val_gbdt = model_gbdt.predict(x_val)
subA_gbdt = model_gbdt.predict(X_test)
#一般比赛中效果最为显著的两种方法
print('Predict XGB...')
model_xgb = build_model_xgb(x_train,y_train)
val_xgb = model_xgb.predict(x_val)
subA_xgb = model_xgb.predict(X_test)

print('Predict lgb...')
model_lgb = build_model_lgb(x_train,y_train)
val_lgb = model_lgb.predict(x_val)
subA_lgb = model_lgb.predict(X_test)

print('Sta inf of lgb:')
Sta_inf(subA_lgb)
# 1)加权融合
def Weighted_method(test_pre1,test_pre2,test_pre3,w=[1/3,1/3,1/3]):
    Weighted_result = w[0]*pd.Series(test_pre1)+w[1]*pd.Series(test_pre2)+w[2]*pd.Series(test_pre3)
    return Weighted_result

## Init the Weight
w = [0.3,0.4,0.3]

##测试验证集准确度
val_pre = Weighted_method(val_lgb,val_sgb,val_gbdt,w)
MAE_Weighted = maen_absolute_error(y_val,val_pre)
print('MAE of Weighted of val:',MAE_Weighted)

#预测数据部分
subA = Weighted_method(subA_lgb,subA_xgb,subA_gbdt,w)
print('Sta inf:')
Sta_inf(subA)
##生成提交文件
sub = pd.DataFrame()
sub['SaleID'] = X_test.index
sub['price'] = subA
sub.to_csv('./sub_Weighted.csv',index=False)

## 与简单的LR(线性回归)进行对比
val_lr_pred = model_lr.predict(x_val)
MAE_lr = mean_absolute_error(y_val,val_lr_pred)
print('MAE of lr:',MAE_lr)
# 2)Stacking融合

##第一层
train_lgb_pred = model_lgb.predict(x_train)
train_xgb_pred = model_xgb.predict(x_train)
train_gbdt_pred = model_gbdt.predict(x_train)

Stack_X_train = pd.DataFrame()
Stack_X_train['Method 1'] = train_lgb_pred
Stack_X_train['Method 2'] = train_xgb_pred
Stack_X_train['Method 3'] = train_gbdt_pred

Stack_X_val = pd.DataFrame()
Stack_X_val['Method 1'] = val_lgb
Stack_X_val['Method 2'] = val_xgb
Stack_X_val['Method 3'] = val_gbdt

Stack_X_test = pd.DataFrame()
Stack_X_test['Method 1'] = subA_lgb
Stack_X_test['Method 2'] = subA_xgb
Stack_X_test['Method 3'] = subA_gbdt

Stack_X_test.head()
## Level2-method
model_lr_Stacking = bulid_model_lr(Stack_X_train,y_train)
## 训练集
train_pre_Stacking = model_lr_Stacking.predict(Stack_X_train)
print('MAE of Stacking-LR:',mean_absolute_error(y_train,train_pre_Stacking))

##验证集
val_pre_Stacking = model_lr_Stacking.predict(Stack_X_val)
print('MAE of Stacking-LR:',mean_absolute_error(y_val,val_pre_Stacking))

##预测集
print('Predict Stacking-LR...')
subA_Stacking = model_lr_Stacking.predict(Stack_X_test)

## 去除过小的预测值
subA_Stacking[subA_Stacking<10] = 10 

sub = pd.DataFrame()
sub['SaleID'] = X_test.index
sub['price'] = subA_Stacking
sub.to_csv('./sub_Stacking.csv',index=False)

print('Sta inf:')
Sta_inf(subA_Stacking)

经验总结

1)结果层面的融合:这最常见的融合方法,其可行的融合方法也有很多。例如:
根据结果的得分进行加权融合,还可以做Log,exp处理等。在做结果融合的时候,有一个很重要的条件是模型结果的得分要比较近似,然后结果的差异要比较大,这样的结果融合往往有比较好的效果提升。
2)特征层面的融合:这个层面其实感觉不叫融合,准确说可以叫分割。
很多时候如果我们用同种模型训练,可以把特征进行切分给不同的模型,然后在后面进行模型或者结果融合有时也能产生比较好的效果。
3)模型层面的融合:模型层面的融合可能就涉及模型的堆叠和设计,比如加Staking层,部分模型的结果作为特征输入等,这些就需要多实验和思考了,基于模型层面的融合最好不同模型类型要有一定的差异,用同种模型不同的参数的收益一般是比较小的。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Python模型融合是一种将多个模型组合在一起以提高整体表现的技术。在机器学习比赛中,特别是团队参与的比赛中,模型融合是一个重要的手段,可以在模型相差较大但表现良好的情况下显著提升结果。 在Python中,有几种常见的模型融合方法。其中一种是投票法(Voting),它通过集成多个模型的预测结果,并根据多数投票的原则来确定最终的预测结果。在使用Python进行投票法模型融合时,可以使用`sklearn.ensemble.VotingClassifier`类来实现。 另一种常见的模型融合方法是堆叠法(Stacking),它通过训练一个元模型来融合多个基模型的预测结果。在Python中,可以使用`mlxtend.classifier.StackingClassifier`类来实现堆叠法模型融合。 除了投票法和堆叠法,还有其他的模型融合方法,如加权平均、Bagging等。这些方法都可以在Python中通过相应的库和函数来实现。选择何种方法取决于具体的问题和数据集的特点。 总结起来,Python模型融合是一种将多个模型组合在一起以提高整体表现的技术。投票法和堆叠法是常见的模型融合方法,在Python中可以使用相应的库和函数来实现。具体选择哪种方法需要根据问题和数据集的特点来决定。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [模型融合](https://blog.csdn.net/Lemon_pudding/article/details/108813638)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *2* *3* [Python数据科学竞赛模型融合](https://blog.csdn.net/qq_43240913/article/details/110822100)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值