1.数据处理
如前面一样:
import numpy as np
import pandas as pd
train_data = pd.read_csv('E:/机器学习/my_code_kaggle/lesson2/input/train.csv',index_col = 0)
test_data = pd.read_csv('E:/机器学习/my_code_kaggle/lesson2/input/test.csv',index_col = 0)
y_train = train_data['SalePrice']
X_train = train_data.drop(['SalePrice'],axis = 1)
#判断平滑性
#y_train.hist()
#发现平滑性不怎么样,最好分布图类似于一个正态分布就比较好
y_train_log = np.log1p(y_train)
#y_train_log.hist()
#把数据拼接在一起处理
data = pd.concat((X_train,test_data),axis = 0)
#MSSubClass属性的数值无意义,作为类别标签看待,转为字符串
#MSSubClass是Categorical类型,用One-Hot的方法来表示
#pandas自带get_dummies方法,一键做到One-Hot
data['MSSubClass'] = data['MSSubClass'].astype(str)
#print(data['MSSubClass'].value_counts())
#把MSSubClass换成字符串标签了之后,就和其他的英文分类差不多了,直接dummy
dummy_data = pd.get_dummies(data)#属性由79个变成了302个,1460*303
#查看各个属性缺失值的个数,用平均值填补缺失值
#print(dummy_train.isnull().sum().sort_values(ascending = False).head(10))
mean_cols = dummy_data.mean()
dummy_data = dummy_data.fillna(mean_cols)#用mean_cols来填满空缺值
#print(dummy_data.isnull().sum().sum())#确定没有空缺值了
#做regression的时候最好先标准化一下
#标准化numerical类型数据,不是One-Hot变成的numerical
numeric_cols = data.columns[data.dtypes != 'object']
#print(numeric_cols)#len(numeric_cols) = 36
numeric_col_means = dummy_data.loc[:,numeric_cols].mean()
numeric_col_std = dummy_data.loc[:,numeric_cols].std()
dummy_data.loc[:,numeric_cols] = (dummy_data.loc[:,numeric_cols]-numeric_col_means)/numeric_col_std
#合在一起处理完特征之后,分开测试集和训练集
dummy_train = dummy_data.loc[train_data.index,:]
dummy_test = dummy_data.loc[test_data.index,:]
2.进阶版的ensemble
Bagging
Bagging把很多的小分类器放在一起,每个train随机的一部分数据,然后把它们的最终结果综合起来(多数投票制)
#进阶版的ensembl
from sklearn import linear_model
ridge = linear_model.Ridge(alpha = 15)
ridge.fit(dummy_train,y_train_log)
#Bagging,每个分类器训练train_data的一小部分数据,再根据结果综合一下
from sklearn.ensemble import BaggingRegressor
from sklearn.model_selection import cross_val_score
params = [1,10,15,20,25,30,40]
test_scores = []
for param in params:
#构造BaggingRegressor模型,base_estimator为训练好的单个弱分类器,如果不写这个参数,默认是一个树
clf = BaggingRegressor(n_estimators = param,base_estimator = ridge)
test_score = np.sqrt(-cross_val_score(clf,dummy_train,y_train_log,cv = 10,scoring = 'neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
#显示结果
import matplotlib.pyplot as plt
plt.plot(params,test_scores)
plt.title('n_estimators vs CV Error')#发现弱分类器个数是0.13会比较好
均方误差进一步变小,从ridge(alpha = 15)的0.135,到random forest(max_feature = 0.3)的0.137,到现在bagging(25)的0.133
3.试一下弱分类器为其他的情况
尝试弱分类器用决策树Decision Tree来做
#Bagging,每个分类器训练train_data的一小部分数据,再根据结果综合一下
from sklearn.ensemble import BaggingRegressor
from sklearn.model_selection import cross_val_score
params = [1,10,15,20,25,30,40]
test_scores = []
for param in params:
#构造BaggingRegressor模型,base_estimator为训练好的单个弱分类器,如果不写这个参数,默认是一个树
clf = BaggingRegressor(n_estimators = param)
test_score = np.sqrt(-cross_val_score(clf,dummy_train,y_train_log,cv = 10,scoring = 'neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
#显示结果
import matplotlib.pyplot as plt
plt.plot(params,test_scores)
plt.title('n_estimators vs CV Error')
均方误差到0.14,还没有岭回归好,所以用一个已经训练好的弱分类器放进bagging会比较好
最后数据的提交,一般是会转圈圈提交不上去的
可以参考
Kaggle上遇到的那些坑-----提交结果(submission.csv)