数据集下载地址:
https://github.com/fayduan/Kaggle_Titanic/blob/master/train.csv
期间参考博客https://www.cnblogs.com/cxfly/p/8505851.html进行了学习并对其中部分废弃方法进行了更正
import pandas as pd
titanic = pd.read_csv('/usr/local/app/train.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
print(titanic['Sex'].unique())
titanic.loc[titanic['Sex']=='male','Sex']=0
titanic.loc[titanic['Sex']=='female','Sex']=1
print(titanic['Embarked'].unique())
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S','Embarked'] =0
titanic.loc[titanic['Embarked'] == 'C','Embarked'] =1
titanic.loc[titanic['Embarked'] == 'Q','Embarked'] =2
=======================================================================
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold #交叉验证,将测试集进行切分验证取平均值
predictors = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked'] #用到的特征
print(titanic[predictors])
alg = LinearRegression()
kf = KFold(n_splits=3,random_state=1)
preictions = []
for train, test in kf.split(titanic[predictors]):
#print train
#print test
train_predictors = (titanic[predictors].iloc[train,:]) #将predictors作为测试特征
#print train_predictors
train_target = titanic['Survived'].iloc[train]
#print train_target
alg.fit(train_predictors,train_target) #构建线性模型 样本的x(训练数据) 样本的y(标签值)
test_prediction = alg.predict(titanic[predictors].iloc[test,:]) #预测结果值
predictions.append(test_prediction)
======================================================================
import numpy as np
predictions = np.concatenate(predictions,axis=0)
predictions[predictions >.5] = 1
predictions[predictions <=.5] = 0
accury = sum(predictions[predictions == titanic['Survived']]) / len(predictions) #测试准确率 进行模型评估
print (accury) #精度值
=======================================================================
#集成算法, 构造多个分类树
#1.构造多个分类器
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
alg = LogisticRegression(random_state=1)
scores = cross_val_score(alg,titanic[predictors],titanic['Survived'],cv=3)
#rint(scores.mean())
========================================================================
#特征提取
##尽可能多的提取特征
#看不同特征的效果
#特征提取是数据挖掘里很重要的一部分
#以上使用的特征都是数据里已经有的了,在真实的数据挖掘里,常常没有合适的特征,需要我们自己去提取
#1.把多个特征组合成一个特征
titanic['Familysize'] = titanic['SibSp'] + titanic['Parch']
titanic['NameLength'] = titanic['Name'].apply(lambda x: len(x))
import re
#print(titanic['Name'])
def get_title(name):
title_resarch = re.search('([A-Za-z]+)\.',name)
if(title_resarch):
return title_resarch.group(1)
return ""
titles = titanic['Name'].apply(get_title)
#print(titles)
#将称号转换成数值表示
title_mapping = {"Mr":1,"Miss":2,"Mrs":3,"Master":4,"Dr":5,"Rev":6,"Col":7,"Major":8,"Mlle":9,"Countess":10,"Ms":11,"Lady":12,"Jonkheer":13,"Don":14,"Mme":15,"Capt":16,"Sir":17}
for k,v in title_mapping.items():
titles[titles==k] = v
titanic['titles'] = titles
#print(titanic)
========================================================================
#进行特征选择
#特征重要性分析
#分析 不同特征对最终结果的影响
#加入一些噪音数据,替换原来的值(注:此时 其它列的数据保持不变),得到错误率error
#两个错误率的差值 可以体现这一个特征的重要性
import numpy as np
from sklearn.feature_selection import SelectKBest,f_classif #feature_selection看每一个特征的重要程度
import matplotlib.pyplot as plt
predictors = ['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Familysize','NameLength','titles']
selector = SelectKBest(f_classif,k=5)
selector.fit(titanic[predictors],titanic['Survived'])
scores = -np.log10(selector.pvalues_)
#集成分类器
#在竞赛中常用的耍赖的办法:集成多种算法,取最后每种算法的平均值,来减少过拟合
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
#GradientBoostingClassifier也是随机森林的算法,可以集成多个弱分类器,然后变成强分类器
algorithas = [
[ GradientBoostingClassifier(random_state=1,n_estimators=25,max_depth=3),
['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Familysize','NameLength','titles']
],
[ LogisticRegression(random_state=1),
['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Familysize','NameLength','titles']
]
]
kf = KFold(n_splits=3,random_state=1)
predictions = []
for train, test in kf.split(titanic[predictors]):
train_target = titanic['Survived'].iloc[train]
full_test_predictions = []
for alg,prediction in algorithas:
alg.fit(titanic[predictors].iloc[train,:],train_target)
test_prediction = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
full_test_predictions.append(test_prediction)
test_predictions = (full_test_predictions[0] + full_test_predictions[1])/2
test_predictions[test_predictions > .5] =1
test_predictions[test_predictions <= .5] =0
predictions.append(test_predictions)
predictions = np.concatenate(predictions,axis =0)
accury = sum(predictions[predictions == titanic['Survived']])/len(predictions)
print(accury)