一起挖掘幸福感赛事学习

幸福感预测线上0.47593

import sys
import gc
import pandas as pd
import matplotlib
import numpy as np
import scipy as sp
from scipy import stats
from scipy.stats import norm,skew,kurtosis #for some statistics
import IPython
from IPython import display
import sklearn
import random
import time
import pickle
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
%matplotlib inline
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
print('-'*25)
help(pd.read_csv)
input_path = 'E:\Document\aliyun_tianchi\task 04'
train_data = pd.read_csv(r'E:\Document\aliyun_tianchi\task 04\happiness_train_complete.csv',sep=',',encoding='latin-1')
test_data = pd.read_csv(r'E:\Document\aliyun_tianchi\task 04\happiness_test_complete.csv',sep=',',encoding='latin-1')
submit_example = pd.read_csv(r'E:\Document\aliyun_tianchi\task 04\happiness_submit.csv',sep=',',encoding='latin-1')


print('train shape:',train_data.shape)
print('test shape:',test_data.shape)
print('sample shape:',submit_example.shape)
#print(train_data)
train shape: (8000, 140)
test shape: (2968, 139)
sample shape: (2968, 2)

train_data = train_data[train_data["happiness"]!=-8].reset_index(drop=True)#注意drop=True  
print('train shape:',train_data.shape)
train_data_copy = train_data.copy()
target_col = "happiness"
target = train_data_copy[target_col]#.apply(lambda x:np.log1p(x))

del train_data_copy[target_col]

train_shape = train_data.shape[0]

data = pd.concat([train_data_copy,test_data],axis=0,ignore_index=True) #合并测试集与训练集
data.head()
print('train shape:',data.shape)

train shape: (7988, 140)
train shape: (10956, 139)
#make feature
def getres1(row):
    return len([x for x in row.values if type(x)==int and x<0])

def getres2(row):
    return len([x for x in row.values if type(x)==int and x==-8])

def getres3(row):
    return len([x for x in row.values if type(x)==int and x==-1])

def getres4(row):
    return len([x for x in row.values if type(x)==int and x==-2])

def getres5(row):
    return len([x for x in row.values if type(x)==int and x==-3])

#调查问卷有效程度
data['neg1'] = data[data.columns].apply(lambda row:getres1(row),axis=1)
data.loc[data['neg1']>20,'neg1'] = 20  #平滑处理

print(data)
391      27
751      31
1110     25
1776     25
2049     27
2059     23
3872     23
4263     26
4641     24
4654     24
4722     48
4987     23
5284     22
5604     27
5806     33
5861     21
5943     21
6047     28
6094     23
6253     29
6381     24
6589     22
6927     21
7440     24
7522     21
8550     21
8597     22
8789     21
9028     21
9029     23
9061     30
9148     22
9355     21
9687     22
10054    21
10064    21
10112    24
10121    21
10387    27
10596    28
10649    21
Name: neg1, dtype: int64
#make feature
def getres1(row):
    return len([x for x in row.values if type(x)==int and x<0])

def getres2(row):
    return len([x for x in row.values if type(x)==int and x==-8])

def getres3(row):
    return len([x for x in row.values if type(x)==int and x==-1])

def getres4(row):
    return len([x for x in row.values if type(x)==int and x==-2])

def getres5(row):
    return len([x for x in row.values if type(x)==int and x==-3])

#调查问卷有效程度
data['neg1'] = data[data.columns].apply(lambda row:getres1(row),axis=1)
data.loc[data['neg1']>20,'neg1'] = 20  #平滑处理

data['neg2'] = data[data.columns].apply(lambda row:getres2(row),axis=1)
data['neg3'] = data[data.columns].apply(lambda row:getres3(row),axis=1)
data['neg4'] = data[data.columns].apply(lambda row:getres4(row),axis=1)
data['neg5'] = data[data.columns].apply(lambda row:getres5(row),axis=1)

data.loc[data['health_problem']<0,'health_problem'] = 0
data.loc[data['religion']<0,'religion'] = 1


data.loc[data['religion_freq']<0,'religion_freq'] = 1

data.loc[data['edu']<0,'edu'] = 0

data.loc[data['edu_status']<0,'edu_status'] = 0

data.loc[data['income']<0,'income'] = 0

data.loc[data['s_income']<0,'s_income']= 0

#对体重修正

data.loc[(data['weight_jin']<=80)&(data['height_cm']>=160),'weight_jin']= data['weight_jin']*2
data.loc[data['weight_jin']<=60,'weight_jin']= data['weight_jin']*2


data.loc[data['family_income']<=0,'family_income']=0
data.loc[data['inc_exp']<=0,'inc_exp']= 0

data.loc[data['equity']<0,'equity'] = 0
data.loc[data['social_neighbor']<0,'social_neighbor'] = 0
data.loc[data['class_10_after']<0,'class_10_after'] = 0
data.loc[data['class_10_before']<0,'class_10_before'] = 0
data.loc[data['class']<0,'class'] = 0
data.loc[data['class_14']<0,'class_14'] = 0
data.loc[data['family_m']<0,'family_m'] = 1
data.loc[data['health']<0,'health'] = 0
data.loc[data['health_problem']<0,'health_problem'] = 0
data.loc[data['neighbor_familiarity']<0,'neighbor_familiarity'] = 0
data.loc[data['inc_ability']<0,'inc_ability'] = np.nan
data.loc[data['status_peer']<0,'status_peer'] = 2  ##
data.loc[data['status_3_before']<0,'status_3_before'] = 2  ##
data.loc[data['edu_yr']<0,'edu_yr'] = 0

#部分特征处理
#data.loc[data['marital_1st']<0,'marital_1st']= np.nan
#data['survey_time'] = pd.to_datetime(data['survey_time'])
#data.loc[data['marital_now']<0,'marital_now']= np.nan
#data.loc[data['join_party']<0,'join_party']= np.nan
#for i in range(1,12+1):
#    data.loc[data['leisure_'+str(i)]<0,'leisure_'+str(i)] = 6
#for i in range(1,9+1):
#    data.loc[data['public_service_'+str(i)]<0,'public_service_'+str(i)] = data['public_service_'+str(i)].dropna().mode().values
#for i in range(1,13+1):
#    data.loc[data['trust_'+str(i)]<0,'trust_'+str(i)] = data['trust_'+str(i)].dropna().mode().values    


#毕业年龄
data['edubir'] = data['edu_yr'] - data['birth'] 
#调查年龄
data['survey_age'] = 2015-data['birth']
#毕业后调查年数差
data['survey_edu_age'] = 2015-data['edu_yr']

#调查月、时
data['survey_month'] = data['survey_time'].dt.month
data['survey_hour']= data['survey_time'].dt.hour
#第一次结婚年龄
data['marital_1stbir'] = data['marital_1st'] - data['birth'] 
#最近结婚年龄
data['marital_nowtbir'] = data['marital_now'] - data['birth'] 
#是否再婚
data['mar'] = data['marital_nowtbir'] - data['marital_1stbir']
#配偶年龄
data['marital_sbir'] = data['marital_now']-data['s_birth']
#配偶年龄差
data['age_'] = data['marital_nowtbir'] - data['marital_sbir'] 

#收入比
data['income/s_income'] = data['income']/(data['s_income']+1)
data['income+s_income'] = data['income']+(data['s_income']+1)
data['income/family_income'] = data['income']/(data['family_income']+1)
data['all_income/family_income'] = (data['income']+data['s_income'])/(data['family_income']+1)
data['income/inc_exp'] = data['income']/(data['inc_exp']+1)
data['family_income/m'] = data['family_income']/(data['family_m']+0.01)

#收入/面积比
data['income/floor_area'] = data['income']/(data['floor_area']+0.01)
data['all_income/floor_area'] = (data['income']+data['s_income'])/(data['floor_area']+0.01)
data['family_income/floor_area'] = data['family_income']/(data['floor_area']+0.01)

data['income/m'] = data['floor_area']/(data['family_m']+0.01)

#class
data['class_10_diff'] = (data['class_10_after'] - data['class_10_before'])
data['class_diff'] = data['class'] - data['class_10_before']
data['class_14_diff'] = data['class'] - data['class_14']


#province mean
data['province_income_mean'] = data.groupby(['province'])['income'].transform('mean').values
data['province_family_income_mean'] = data.groupby(['province'])['family_income'].transform('mean').values
data['province_equity_mean'] = data.groupby(['province'])['equity'].transform('mean').values
data['province_depression_mean'] = data.groupby(['province'])['depression'].transform('mean').values
data['province_floor_area_mean'] = data.groupby(['province'])['floor_area'].transform('mean').values

#city   mean
data['city_income_mean'] = data.groupby(['city'])['income'].transform('mean').values
data['city_family_income_mean'] = data.groupby(['city'])['family_income'].transform('mean').values
data['city_equity_mean'] = data.groupby(['city'])['equity'].transform('mean').values
data['city_depression_mean'] = data.groupby(['city'])['depression'].transform('mean').values
data['city_floor_area_mean'] = data.groupby(['city'])['floor_area'].transform('mean').values

#county  mean
data['county_income_mean'] = data.groupby(['county'])['income'].transform('mean').values
data['county_family_income_mean'] = data.groupby(['county'])['family_income'].transform('mean').values
data['county_equity_mean'] = data.groupby(['county'])['equity'].transform('mean').values
data['county_depression_mean'] = data.groupby(['county'])['depression'].transform('mean').values
data['county_floor_area_mean'] = data.groupby(['county'])['floor_area'].transform('mean').values

#ratio 相比同个城市
data['income/county'] = data['income']/(data['county_income_mean']+1)                                      
data['family_income/county'] = data['family_income']/(data['county_family_income_mean']+1)   
data['equity/county'] = data['equity']/(data['county_equity_mean']+1)       
data['depression/county'] = data['depression']/(data['county_depression_mean']+1)                                                
data['floor_area/county'] = data['floor_area']/(data['county_floor_area_mean']+1)   

#age   mean
data['age_income_mean'] = data.groupby(['survey_age'])['income'].transform('mean').values
data['age_family_income_mean'] = data.groupby(['survey_age'])['family_income'].transform('mean').values
data['age_equity_mean'] = data.groupby(['survey_age'])['equity'].transform('mean').values
data['age_depression_mean'] = data.groupby(['survey_age'])['depression'].transform('mean').values
data['age_floor_area_mean'] = data.groupby(['survey_age'])['floor_area'].transform('mean').values
data['age_health_mean'] = data.groupby(['survey_age','gender'])['health'].transform('mean').values
data['age_edu_mean'] = data.groupby(['survey_age','gender'])['edu'].transform('mean').values



#age/gender   mean
data['age_income_mean'] = data.groupby(['survey_age','gender'])['income'].transform('mean').values
data['age_family_income_mean'] = data.groupby(['survey_age','gender'])['family_income'].transform('mean').values
data['age_equity_mean'] = data.groupby(['survey_age','gender'])['equity'].transform('mean').values
data['age_depression_mean'] = data.groupby(['survey_age','gender'])['depression'].transform('mean').values
data['age_floor_area_mean'] = data.groupby(['survey_age','gender'])['floor_area'].transform('mean').values
# data['age_BMI_mean'] = data.groupby(['survey_age','gender'])['BMI'].transform('mean').values
data['age_gender_health_mean'] = data.groupby(['survey_age','gender'])['health'].transform('mean').values


#class   mean
data['city_class_income_mean'] = data.groupby(['class'])['income'].transform('mean').values
data['city_class_family_income_mean'] = data.groupby(['class'])['family_income'].transform('mean').values
data['city_class_equity_mean'] = data.groupby(['class'])['equity'].transform('mean').values
data['city_class_depression_mean'] = data.groupby(['class'])['depression'].transform('mean').values
data['city_class_floor_area_mean'] = data.groupby(['class'])['floor_area'].transform('mean').values


#悠闲指数
leisure_fea_lis = ['leisure_'+str(i) for i in range(1,13)]
data['leisure_sum'] = data[leisure_fea_lis].sum() #skew

#满意指数
public_service_fea_lis = ['public_service_'+str(i) for i in range(1,10)]
data['public_service_'] = data[public_service_fea_lis].sum() #skew

data['city_public_service__mean'] = data.groupby(['city'])['public_service_'].transform('mean').values
data['public_service_cit'] = data['public_service_']-data['city_public_service__mean'] 


#信任指数
trust_fea_lis = ['trust_'+str(i) for i in range(1,10)]
data['trust_'] = data[trust_fea_lis].sum() #skew


#入党时间   
#data['survey_edu_age'] = 2015-data['join_party']
#data['survey_edu_age'].fillna(0,inplace=True)
#del data['join_party']
#del data['property_other']
#del data['id']

print(data.columns)
Index(['survey_type', 'province', 'city', 'county', 'survey_time', 'gender',
       'birth', 'nationality', 'religion', 'religion_freq',
       ...
       'city_class_income_mean', 'city_class_family_income_mean',
       'city_class_equity_mean', 'city_class_depression_mean',
       'city_class_floor_area_mean', 'leisure_sum', 'public_service_',
       'city_public_service__mean', 'public_service_cit', 'trust_'],
      dtype='object', length=202)
data.fillna(-1,inplace=True)
print('shape',data.shape)
data.head()
shape (10956, 202)
survey_typeprovincecitycountysurvey_timegenderbirthnationalityreligionreligion_freq...city_class_income_meancity_class_family_income_meancity_class_equity_meancity_class_depression_meancity_class_floor_area_meanleisure_sumpublic_service_city_public_service__meanpublic_service_cittrust_
011232592015-08-04 14:18:0011959111...24641.04081639486.6435373.0265313.650340107.158027-1.0-1.0-1.0-1.0-1.0
121852852015-07-21 15:04:0011992111...43440.000000104062.5761643.3378064.023678124.915075-1.0-1.0-1.0-1.0-1.0
2229831262015-07-21 13:24:0021967103...32394.18320670886.4764413.2813903.943143122.765315-1.0-1.0-1.0-1.0-1.0
321028512015-07-25 17:33:0021943111...32394.18320670886.4764413.2813903.943143122.765315-1.0-1.0-1.0-1.0-1.0
41718362015-08-10 09:50:0021994111...15683.06508931480.6990522.7325443.36331497.374911-1.0-1.0-1.0-1.0-1.0

5 rows × 202 columns

#使用所有特征
use_fea = [clo for clo in data.columns if clo!='survey_time' and data[clo].dtype!=object]
features = data[use_fea].columns
X_train = data[:train_shape][use_fea].values
y_train = target
X_test = data[train_shape:][use_fea].values

from sklearn.metrics import roc_auc_score, roc_curve, mean_squared_error,mean_absolute_error, f1_score
import lightgbm as lgb
import xgboost as xgb
import os
from sklearn.linear_model import BayesianRidge
from sklearn.model_selection import  KFold, StratifiedKFold,GroupKFold, RepeatedKFold
import logging

##### lgb
param = {
'num_leaves': 80,
'min_data_in_leaf': 40,
'objective':'regression',
'max_depth': -1,
'learning_rate': 0.1,
"min_child_samples": 30,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 2,
"bagging_fraction": 0.9,
"bagging_seed": 2029,
"metric": 'mse',
"lambda_l1": 0.1,
"lambda_l2": 0.2, 
"verbosity": -1}
folds = KFold(n_splits=10, shuffle=True, random_state=1016)   #StratifiedKFold?   KFold
oof_lgb = np.zeros(len(X_train))
predictions_lgb = np.zeros(len(X_test))

for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
    print("fold n°{}".format(fold_+1))
    trn_data = lgb.Dataset(X_train[trn_idx], y_train[trn_idx])
    val_data = lgb.Dataset(X_train[val_idx], y_train[val_idx])

    num_round = 10000
    clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=200, early_stopping_rounds = 200)
    oof_lgb[val_idx] = clf.predict(X_train[val_idx], num_iteration=clf.best_iteration)
    predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits

print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, target)))
fold n°1
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0178584	valid_1's l2: 0.440415
Early stopping, best iteration is:
[53]	training's l2: 0.17148	valid_1's l2: 0.42822
fold n°2
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0172029	valid_1's l2: 0.471802
Early stopping, best iteration is:
[78]	training's l2: 0.112334	valid_1's l2: 0.465844
fold n°3
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0174407	valid_1's l2: 0.524768
Early stopping, best iteration is:
[48]	training's l2: 0.184526	valid_1's l2: 0.513957
fold n°4
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.016992	valid_1's l2: 0.509479
Early stopping, best iteration is:
[32]	training's l2: 0.247561	valid_1's l2: 0.496666
fold n°5
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.017454	valid_1's l2: 0.453909
Early stopping, best iteration is:
[44]	training's l2: 0.200869	valid_1's l2: 0.441661
fold n°6
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0171639	valid_1's l2: 0.468323
Early stopping, best iteration is:
[60]	training's l2: 0.149475	valid_1's l2: 0.462298
fold n°7
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0172571	valid_1's l2: 0.455683
Early stopping, best iteration is:
[39]	training's l2: 0.219661	valid_1's l2: 0.447699
fold n°8
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0172798	valid_1's l2: 0.528483
Early stopping, best iteration is:
[32]	training's l2: 0.249316	valid_1's l2: 0.507831
fold n°9
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0174847	valid_1's l2: 0.469453
Early stopping, best iteration is:
[47]	training's l2: 0.191068	valid_1's l2: 0.450083
fold n°10
[LightGBM] [Warning] min_data_in_leaf is set=40, min_child_samples=30 will be ignored. Current value: min_data_in_leaf=40
Training until validation scores don't improve for 200 rounds
[200]	training's l2: 0.0172514	valid_1's l2: 0.46314
Early stopping, best iteration is:
[74]	training's l2: 0.119155	valid_1's l2: 0.456972
CV score: 0.46712643
#---------------特征重要性
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
df = pd.DataFrame(data[use_fea].columns.tolist(), columns=['feature'])
df['importance']=clf.feature_importance()
df = df.sort_values(by='importance',ascending=False)
plt.figure(figsize=(14,28))
sns.barplot(x="importance", y="feature", data=df.head(50))
plt.title('Features importance (averaged/folds)')
plt.tight_layout()

在这里插入图片描述

##### xgb
xgb_params = {'eta': 0.05, 'max_depth': 10, 'subsample': 0.8, 'colsample_bytree': 0.8, 
          'objective': 'reg:linear', 'eval_metric': 'rmse', 'silent': True, 'nthread': 4}

X_train_use = data[use_fea][:train_shape].values
X_test_use = data[use_fea][train_shape:].values

folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_xgb = np.zeros(train_shape)
predictions_xgb = np.zeros(len(X_test_use))

for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
    print("fold n°{}".format(fold_+1))
    trn_data = xgb.DMatrix(X_train[trn_idx], y_train[trn_idx])
    val_data = xgb.DMatrix(X_train[val_idx], y_train[val_idx])

    watchlist = [(trn_data, 'train'), (val_data, 'valid_data')]
    clf = xgb.train(dtrain=trn_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=100, params=xgb_params)
    oof_xgb[val_idx] = clf.predict(xgb.DMatrix(X_train[val_idx]), ntree_limit=clf.best_ntree_limit)
    predictions_xgb += clf.predict(xgb.DMatrix(X_test), ntree_limit=clf.best_ntree_limit) / folds.n_splits

print("CV score: {:<8.8f}".format(mean_squared_error(oof_xgb, target)))
fold n°1
[15:02:42] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/objective/regression_obj.cu:170: reg:linear is now deprecated in favor of reg:squarederror.
[15:02:42] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/learner.cc:541: 
Parameters: { silent } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


[0]	train-rmse:3.30166	valid_data-rmse:3.29464
[100]	train-rmse:0.21953	valid_data-rmse:0.68739
[200]	train-rmse:0.08475	valid_data-rmse:0.68507
[300]	train-rmse:0.03203	valid_data-rmse:0.68482
[400]	train-rmse:0.01128	valid_data-rmse:0.68479
[500]	train-rmse:0.00393	valid_data-rmse:0.68485
[530]	train-rmse:0.00284	valid_data-rmse:0.68485
fold n°2
[15:03:27] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/objective/regression_obj.cu:170: reg:linear is now deprecated in favor of reg:squarederror.
[15:03:27] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/learner.cc:541: 
Parameters: { silent } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


[0]	train-rmse:3.29521	valid_data-rmse:3.32372
[100]	train-rmse:0.22777	valid_data-rmse:0.67676
[200]	train-rmse:0.09615	valid_data-rmse:0.67418
[300]	train-rmse:0.03693	valid_data-rmse:0.67489
[400]	train-rmse:0.01397	valid_data-rmse:0.67483
[418]	train-rmse:0.01185	valid_data-rmse:0.67480
fold n°3
[15:03:55] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/objective/regression_obj.cu:170: reg:linear is now deprecated in favor of reg:squarederror.
[15:03:55] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/learner.cc:541: 
Parameters: { silent } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


[0]	train-rmse:3.30791	valid_data-rmse:3.26841
[100]	train-rmse:0.22667	valid_data-rmse:0.69967
[200]	train-rmse:0.09633	valid_data-rmse:0.69768
[300]	train-rmse:0.03619	valid_data-rmse:0.69823
[376]	train-rmse:0.01673	valid_data-rmse:0.69829
fold n°4
[15:04:19] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/objective/regression_obj.cu:170: reg:linear is now deprecated in favor of reg:squarederror.
[15:04:19] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/learner.cc:541: 
Parameters: { silent } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


[0]	train-rmse:3.29880	valid_data-rmse:3.30782
[100]	train-rmse:0.22282	valid_data-rmse:0.69518
[200]	train-rmse:0.09137	valid_data-rmse:0.69019
[300]	train-rmse:0.03626	valid_data-rmse:0.69060
[393]	train-rmse:0.01535	valid_data-rmse:0.69049
fold n°5
[15:04:47] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/objective/regression_obj.cu:170: reg:linear is now deprecated in favor of reg:squarederror.
[15:04:47] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.3.0/src/learner.cc:541: 
Parameters: { silent } might not be used.

  This may not be accurate due to some parameters are only used in language bindings but
  passed down to XGBoost core.  Or some parameters are not used but slip through this
  verification. Please open an issue if you find above cases.


[0]	train-rmse:3.29689	valid_data-rmse:3.31607
[100]	train-rmse:0.22293	valid_data-rmse:0.71063
[200]	train-rmse:0.08727	valid_data-rmse:0.71013
[300]	train-rmse:0.03349	valid_data-rmse:0.71038
[338]	train-rmse:0.02331	valid_data-rmse:0.71032
CV score: 0.47779700
# 将lgb和xgb的结果进行stacking
train_stack = np.vstack([oof_lgb,oof_xgb]).transpose()
test_stack = np.vstack([predictions_lgb, predictions_xgb]).transpose()

folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=4590)
oof_stack = np.zeros(train_stack.shape[0])
predictions = np.zeros(test_stack.shape[0])

for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack,target)):
    print("fold {}".format(fold_))
    trn_data, trn_y = train_stack[trn_idx], target.iloc[trn_idx].values
    val_data, val_y = train_stack[val_idx], target.iloc[val_idx].values
    
    clf_3 = BayesianRidge()
    clf_3.fit(trn_data, trn_y)
    
    oof_stack[val_idx] = clf_3.predict(val_data)
    predictions += clf_3.predict(test_stack) / 10
    
mean_squared_error(target.values, oof_stack) 
fold 0
fold 1
fold 2
fold 3
fold 4
fold 5
fold 6
fold 7
fold 8
fold 9





0.4606959700010243
submit_example['happiness'] = predictions
now = datetime.now().strftime('%m-%d-%H-%M')
submit_example.to_csv("./baseline_%s.csv" % now, index=False)
submit_example.head()
idhappiness
080013.727733
180022.828579
280033.211417
380044.362161
480053.253053

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值