用GBDT、XGboost、神经网络生成新的特征

import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
from sklearn.metrics import roc_auc_score

train_agg =pd.read_csv('data/train_agg.csv',sep ='\t')
train_flg =pd.read_csv('data/train_flg.csv',sep ='\t')
test_agg =pd.read_csv('data/test_agg.csv',sep ='\t')
test_flg =pd.read_csv('data/test_flg.csv',sep ='\t')
print (train_agg.shape,train_flg.shape,test_agg.shape,test_flg.shape)

train_df = train_agg.merge(train_flg, on = 'USRID', how = 'left')
test_df = test_agg.merge(test_flg, on = 'USRID', how = 'left')
print (train_df.shape,train_df.columns)
data = train_df.iloc[:,0:-3]
flag = train_df.iloc[:,-1]
test_data = test_df.iloc[:,0:-3]
test_flag = test_df.iloc[:,-1]

basic_lm = LogisticRegression()
basic_lm.fit(data, flag)
y_pred_basic_lm = basic_lm.predict_proba(test_data)[:, 1]
auc_basic_lm = roc_auc_score(test_flag, y_pred_basic_lm)
print("basic lm->auc:",auc_basic_lm)

'''
NET+LR
'''
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.models import Model
from keras.datasets import reuters
model = Sequential()
model.add(Dense(25, activation='relu', input_dim=29))
model.add(Dense(15, activation='relu',name="Dense_1"))
model.add(Dense(1, activation='sigmoid',name="Dense_2"))
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.fit(data, flag, epochs=15, batch_size=1000)
#已有的model在load权重过后
#取某一层的输出为输出新建为model,采用函数模型
dense1_layer_model = Model(inputs=model.input,outputs=model.get_layer('Dense_1').output)
#以这个model的预测值作为输出
dense1_train_output = dense1_layer_model.predict(data) 
dense1_test_output =dense1_layer_model.predict(test_data)
print(dense1_train_output.shape, dense1_test_output.shape)

net_lm =LogisticRegression()
net_lm.fit(dense1_train_output, flag)
y_pred_net_lm = net_lm.predict_proba(dense1_test_output)[:, 1]
auc_net_lm = roc_auc_score(test_flag, y_pred_net_lm)
print("net+lm->auc:",auc_net_lm)


'''
GBDT+LR
'''

n_estimator =100
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(data, flag)
grd_enc.fit(grd.apply(data)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(data)[:, :, 0]), flag)
y_pred_grd_lm = grd_lm.predict_proba(grd_enc.transform(grd.apply(test_data)[:, :, 0]))[:, 1]
auc_grd_lm = roc_auc_score(test_flag, y_pred_grd_lm)
print("gbdt+lm->auc:",auc_grd_lm)

'''
XGB+LR
'''
XGB = xgb.XGBClassifier(nthread=4, learning_rate=0.08, n_estimators=100, colsample_bytree=0.5)
XGB.fit(data, flag)
OHE = OneHotEncoder()
OHE.fit(XGB.apply(data))
LR = LogisticRegression(n_jobs=4, C=0.1, penalty='l1')
LR.fit(OHE.transform(XGB.apply(data)), flag)
Y_pred_xgb_lm = LR.predict_proba(OHE.transform(XGB.apply(test_data)))[:, 1]
auc_xgb_lm = roc_auc_score(test_flag, Y_pred_xgb_lm)
print("xgb+lm->auc:",auc_xgb_lm)

print('XGBoost + LogisticRegression: ', auc)

  • 2
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值