Kaggle——Titanic罹难乘客预测

import pandas as pd


#读取数据集
train = pd.read_csv(r'D:\dataset\Titanic\train.csv')
test = pd.read_csv(r'D:\dataset\Titanic\test.csv')

print(train.info()) # 先分别输出一下测试集与数据集的基本信息
print(test.info())
print(train.describe())
# mean字段告诉我们,大概0.383838的人最后获救了,2/3等舱的人数比1等舱要多,平均乘客年龄大概是29.7岁(计算这个时候会略掉无记录的)等等…
train.head()
# 按照对Titanic事件的经验,人工选取对预测有效的特征
selected_features = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']
X_train = train[selected_features]
X_test = test[selected_features]
y_train = train['Survived']
# print(y_train)
# print(X_train)
# print(X_test)

# 通过对数据的观察,得知Embarked特征存在缺失值,需要补完,处理缺失值
print(X_train['Embarked'].value_counts())
print(X_test['Embarked'].value_counts())


# 对于Embarkeed这种类别型的特征,一般使用出现频率最高的特征来填充,这是相对可以减少引入误差的一种填充方法
X_train['Embarked'].fillna('S', inplace = True)
X_test['Embarked'].fillna('S', inplace = True)


#对于Age这种数值类型的特征,一般使用求平均值或者中位数来填充缺失值
X_train['Age'].fillna(X_train['Age'].mean(), inplace = True)
X_test['Age'].fillna(X_test['Age'].mean(), inplace = True)
X_test['Fare'].fillna(X_test['Fare'].mean(), inplace = True)

# 重新对处理后的训练和测试数据进行查验
X_train.info() # 缺失值已经处理好了,一切就绪
X_test.info()
S    644
C    168
Q     77
Name: Embarked, dtype: int64
S    270
C    102
Q     46
Name: Embarked, dtype: int64
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 7 columns):
Pclass      891 non-null int64
Sex         891 non-null object
Age         891 non-null float64
SibSp       891 non-null int64
Parch       891 non-null int64
Fare        891 non-null float64
Embarked    891 non-null object
dtypes: float64(2), int64(3), object(2)
memory usage: 48.8+ KB
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 418 entries, 0 to 417
Data columns (total 7 columns):
Pclass      418 non-null int64
Sex         418 non-null object
Age         418 non-null float64
SibSp       418 non-null int64
Parch       418 non-null int64
Fare        418 non-null float64
Embarked    418 non-null object
dtypes: float64(2), int64(3), object(2)
memory usage: 22.9+ KB


D:\Software\Anaconda3\lib\site-packages\pandas\core\generic.py:5430: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  self._update_inplace(new_data)
# 采用DictVectorizer对特征进行向量化
import numpy as np
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
dict_vec = DictVectorizer(sparse=False)
X_train = dict_vec.fit_transform(X_train.to_dict(orient='record'))
X_test = dict_vec.transform(X_test.to_dict(orient='record'))
dict_vec.feature_names_
['Age',
 'Embarked=C',
 'Embarked=Q',
 'Embarked=S',
 'Fare',
 'Parch',
 'Pclass',
 'Sex=female',
 'Sex=male',
 'SibSp']
from sklearn.ensemble import RandomForestClassifier # 从sklean.ensemble 中导入RandomForestClassifier
from xgboost import XGBClassifier # 从流行工具包xgboost导入XGBClassifier 用于处理分类预测问题
from sklearn.cross_validation import cross_val_score
# 创建模型
rfc = RandomForestClassifier() # 默认参数
xgbc = XGBClassifier()

# 采用5次交叉验证的方法在训练集上分别对默认配置的RandonForestClissifier和XGBClassifier进行性能评估,并获得平均分类准确性的得分
print(cross_val_score(rfc, X_train, y_train, cv=5).mean())
print(cross_val_score(xgbc, X_train, y_train, cv=5).mean())
0.8091997728906474
0.81824559798311
# 使用默认参数的RandomForestClassifier进行训练及预测操作
rfc.fit(X_train, y_train)
'''
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
            max_depth=None, max_features='auto', max_leaf_nodes=None,
            min_impurity_decrease=0.0, min_impurity_split=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
            oob_score=False, random_state=None, verbose=0,
            warm_start=False)
'''
rfc_y_predict = rfc.predict(X_test)

# 将默认参数的RandomForestClassifier对测试数据的预测结果存储在文件rfc_submission中
rfc_submission = pd.DataFrame({'PassengerId':test['PassengerId'], 'Survived':rfc_y_predict}) # 预测结果
rfc_submission.to_csv(r'D:\dataset\Titanic\rfc_submission.csv', index=False)

# 使用默认参数的XGBClassifier进行训练及预测操作
xgbc.fit(X_train, y_train)
'''
XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
       colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
       max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
       n_jobs=1, nthread=None, objective='binary:logistic', random_state=0,
       reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
       silent=True, subsample=1)
'''
xgbc_y_predict = xgbc.predict(X_test)


# 将默认参数的XGBClassifier对测试数据的预测结果存储在文件rfc_submission中
xgbc_submission = pd.DataFrame({'PassengerId':test['PassengerId'], 'Survived':xgbc_y_predict}) # 预测结果
xgbc_submission.to_csv(r'D:\dataset\Titanic\xgbc_submission.csv', index=False)
# 使用并行网格搜索的方式寻找更好的超参数组合,以期待进一步提高XGBclassifier的预测性能
from sklearn.grid_search import GridSearchCV

params={'max_depth':list(range(2,7)),'n_estimators':list(range(100,1100,200)),'learning_rate':[0.05,0.1,0.25,0.5,1.0]}

xgbc_best = XGBClassifier()
gs = GridSearchCV(xgbc_best, params, n_jobs=-1, cv=5, verbose=1)
gs.fit(X_train, y_train)
Fitting 5 folds for each of 125 candidates, totalling 625 fits


[Parallel(n_jobs=-1)]: Done  42 tasks      | elapsed:   26.7s
[Parallel(n_jobs=-1)]: Done 192 tasks      | elapsed:   40.9s
[Parallel(n_jobs=-1)]: Done 442 tasks      | elapsed:  1.1min
[Parallel(n_jobs=-1)]: Done 625 out of 625 | elapsed:  1.4min finished





GridSearchCV(cv=5, error_score='raise',
       estimator=XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
       colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
       max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
       n_jobs=1, nthread=None, objective='binary:logistic', random_state=0,
       reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
       silent=True, subsample=1),
       fit_params={}, iid=True, n_jobs=-1,
       param_grid={'max_depth': [2, 3, 4, 5, 6], 'n_estimators': [100, 300, 500, 700, 900], 'learning_rate': [0.05, 0.1, 0.25, 0.5, 1.0]},
       pre_dispatch='2*n_jobs', refit=True, scoring=None, verbose=1)
# 查验优化之后的XGBClassifier的超参数配置以及交叉验证的准确性
print(gs.best_score_)
print(gs.best_params_)
0.835016835016835
{'learning_rate': 0.1, 'max_depth': 5, 'n_estimators': 100}
xgbc_best_y_predict = gs.predict(X_test)

# 使用经过优化超参数配置的XGBClassifier对测试数据的预测结果存储在文件xgbc_best_submission.csv中
xgbc_best_submission = pd.DataFrame({'PassengerId':test['PassengerId'], 'Survived':xgbc_best_y_predict}) # 预测结果
xgbc_best_submission.to_csv(r'D:\dataset\Titanic\xgbc_best_submission.csv', index=False)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值