XGBoost实践泰坦尼克号可生还人员预测

联合之前的logistic和random forest:

数据集下载:https://download.csdn.net/download/oliverkingli/10465451

import xgboost as xgb
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import csv

def show_accuracy(a, b, tip):
	acc= a.ravel() == b.ravel()
	acc_rate = 100 * float(acc.sum()) / a.size
	print('%s accuracy:%.3f%%' %(tip, acc_rate))
	return acc_rate

def load_data(file_name, is_train):
	data = pd.read_csv(file_name)
	print('data.describe:\n', data.describe())
	# 将文件中的male和female重新编码成1, 0,然后类型转换为int
	data['Sex'] = data['Sex'].map({'female':0, 'male':1}).astype(int)

	# 补齐文件数据中船票那部分缺失值
	if len(data.Fare[data.Fare.isnull()]) > 0:
		fare = np.zeros(3)
		for f in range(0, 3):
			fare[f] = data[data.Pclass == f + 1]['Fare'].dropna().median()
		for f in range(0, 3):
			data.loc[(data.Fare.isnull()) & (data.Pclass == f + 1), 'Fare'] = fare[f]

	# # 年龄,使用均值去替代缺失值
	# mean_age = data['Age'].dropna().mean()
	# data.loc[(data.Age.isnull()), 'Age'] = mean_age

	if is_train:
		# 开始使用随机森林去预测年龄的缺失值
		print('RandomForest to predict ages:starting........')
		data_for_age = data[['Age', 'Survived', 'Fare', 'Parch', 'SibSp', 'Pclass']]
		# 年龄不缺失的数据
		age_exist = data_for_age.loc[(data.Age.notnull())]
		# 年龄缺失
		age_null = data_for_age.loc[(data.Age.isnull())]
		print('age_exist:\n', age_exist)
		x = age_exist.values[:, 1:]
		y = age_exist.values[:, 0]
		rfr = RandomForestRegressor(n_estimators=1000)
		rfr.fit(x, y)
		age_hat = rfr.predict(age_null.values[:, 1:])
		print('age_hat:\n', age_hat)
		data.loc[(data.Age.isnull()), 'Age'] = age_hat
		print('RandomForest to predict ending........')
	else:
		print('RandomForest to predict ages 2:starting........')
		data_for_age = data[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
		# 年龄存在的数据,也就是不缺失的
		age_exist = data_for_age.loc[(data.Age.notnull())]
		age_null = data_for_age.loc[(data.Age.isnull())]
		print('age_exist:\n', age_exist)
		x = age_exist.values[:, 1:]
		y = age_exist.values[:, 0]
		rfr = RandomForestRegressor(n_estimators=1000)
		rfr.fit(x, y)
		age_hat = rfr.predict(age_null.values[:, 1:])
		print('age_hat:\n', age_hat)
		data.loc[(data.Age.isnull()), 'Age'] = age_hat
		print('RandomForest to predict ending........')

	# 起始的城市
	# 这里保留着缺失出发城市的数据
	data.loc[(data.Embarked.isnull()), 'Embarked'] = 'S'
	print('data[Embarked]:\n', data['Embarked'])
	embarked_data = pd.get_dummies(data.Embarked)
	print('embarked_data:\n', embarked_data)
	embarked_data = embarked_data.rename(columns=lambda x:'Embarked ' + str(x))
	data = pd.concat([data, embarked_data], axis=1)
	print('data.describe:\n', data.describe())
	data.to_csv('New_data.csv')

	x = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked C', 'Embarked Q', 'Embarked S']]
	y = None
	if 'Survived' in data:
		y = data['Survived']

	x = np.array(x)
	y = np.array(y)
	# 对数据进行维度扩充
	x = np.tile(x, (5, 1))
	y =np.tile(y, (5, ))
	if is_train:
		return x, y

	return x, data['PassengerId']


def write_result(c, c_type):
	file_name = 'Titanic.test.csv'
	x, passenger_id = load_data(file_name, False)

	if type == 3:
		x = xgb.DMatrix(x)

	y = c.predict(x)
	y[y > 0.5] = 1
	y[~(y > 0.5)] = 0

	predictions_file = open("Prediction_%d.csv" % c_type, "wt")
	open_file_object = csv.writer(predictions_file)
	open_file_object.writerow(['PassengerId', 'Survived'])
	open_file_object.writerows(zip(passenger_id, y))
	predictions_file.close()


if __name__ == '__main__':
	x, y = load_data('Titanic.train.csv', True)
	x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.7, random_state=1)

	# 回归
	lr = LogisticRegression(penalty='l2')
	lr.fit(x_train, y_train)
	y_hat = lr.predict(x_test)
	lr_rate = show_accuracy(y_hat, y_test, 'Logistic regression')
	write_result(lr, 1)
	# 随机森林
	rfc = RandomForestClassifier(n_estimators=100)
	rfc.fit(x_train, y_train)
	y_hat = rfc.predict(x_test)
	rfc_rate = show_accuracy(y_hat, y_test, 'Random Forest')
	write_result(rfc, 2)
	# xgboost
	data_train = xgb.DMatrix(x_train, label=y_train)
	data_test = xgb.DMatrix(x_test, label=y_test)
	watch_list = [(data_test, 'eval'), (data_train, 'train')]
	param = {'max_depth':6, 'eta':0.8, 'silent':1, 'objective':'binary:logistic'}
	bst = xgb.train(param, data_train, num_boost_round=200, evals=watch_list)
	y_hat = bst.predict(data_test)
	# write_result(bst, 3)
	y_hat[y_hat > 0.5] = 1
	y_hat[~(y_hat > 0.5)] = 0
	xgb_rate = show_accuracy(y_hat, y_test, 'XGBoost')

	print('Logistic regression prediction rate:%.4f%%' % lr_rate)
	print('Random Forest rate:%.4f%%' % rfc_rate)
	print('XGBoost prediction rate:%.4f%%' % xgb_rate)

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值