**泰坦尼克船难生存情况预测**

#导入各种库
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import sklearn



#1 数据分析
#1.1 查看train.csv数据
data_train = pd.read_csv("C:/Users/Administrator/Desktop/kaggle/Titanic-dataset/train.csv")
print(data_train)

#1.2 查看数据和表格的基本信息
print(data_train.info())
print(data_train.describe())
'''
data_train.info()
assengerId     891 non-null int64
Survived       891 non-null int64
Pclass         891 non-null int64
Name           891 non-null object
Sex            891 non-null object
Age            714 non-null float64
SibSp          891 non-null int64
Parch          891 non-null int64
Ticket         891 non-null object
Fare           891 non-null float64
Cabin          204 non-null object
Embarked       889 non-null object

PassengerId:乘客编号
Survived:是否生存,1表生存,0表示遇难
Pclass:舱位等级,分为一等舱、二等舱、三等舱
Name:乘客姓名
Sex:性别,Male或Female
Age:年龄
SibSp:兄弟姐妹、堂兄弟姐妹人数
Parch:父母与子女个数
Ticket:船票信息(上面记载着座位号)
Fare:票价
Cabin:客舱
Embarked:登船港口


'''
#1.3根据data_train.info()结果发现Age、Cabin、Embarked这三个属性是有空值的,且Cabin缺值很大
'''
data_train.describe():
       PassengerId    Survived     ...           Parch        Fare
count   891.000000  891.000000     ...      891.000000  891.000000
mean    446.000000    0.383838     ...        0.381594   32.204208
std     257.353842    0.486592     ...        0.806057   49.693429
min       1.000000    0.000000     ...        0.000000    0.000000
25%     223.500000    0.000000     ...        0.000000    7.910400
50%     446.000000    0.000000     ...        0.000000   14.454200
75%     668.500000    1.000000     ...        0.000000   31.000000
max     891.000000    1.000000     ...        6.000000  512.329200
[8 rows x 7 columns]
#得到data_train的描述性统计,只有7列,其中Name、Ticket、Cabin是类目信息,无法显示
'''

#2 数据分析
#2.1 依据相关属性下,survived的大小
fig=plt.figure()

plt.subplot2grid((3,3),(0,0))
data_train.Survived.value_counts().plot(kind='bar')
plt.title=('总的获救情况')
plt.xlabel=("获救(1)或未获救(0)")
plt.ylabel=('人数')


plt.subplot2grid((3,3),(0,1))
data_train.Pclass.value_counts().plot(kind='bar')
plt.title=('不同船舱人数分布')
plt.xlabel=("船舱等级")
plt.ylabel=('人数')


plt.subplot2grid((3,3),(0,2))
data_train.Sex.value_counts().plot(kind='bar')
plt.title=('乘客性别情况')
plt.xlabel=("乘客性别")
plt.ylabel=('人数')

plt.subplot2grid((3,3),(1,0))
data_train.Age.value_counts().plot(kind='kde')
plt.title=('乘客年龄情况')
plt.xlabel=("乘客年龄")
plt.ylabel=('人数百分比')

plt.subplot2grid((3,3),(1,1))
data_train.SibSp.value_counts().plot(kind='bar')
plt.title=('乘客兄弟姐妹、堂兄弟姐妹人数情况')
plt.xlabel=("兄弟姐妹、堂兄弟姐妹人数")
plt.ylabel=('人数')

plt.subplot2grid((3,3),(1,2))
data_train.Parch.value_counts().plot(kind='bar')
plt.title=('乘客父母与子女个数情况')
plt.xlabel=("父母与子女个数")
plt.ylabel=('人数')

plt.subplot2grid((3,3),(2,0))
data_train.Embarked.value_counts().plot(kind='bar')
plt.title=('乘客登船港口情况')
plt.xlabel=("登船港口")
plt.ylabel=('人数')

plt.show()

#2.2 各属性与获救人数的关联分析
fig1=plt.figure()
#船舱等级因素
pclass0=data_train.Pclass[data_train.Survived==0].value_counts()
pclass1=data_train.Pclass[data_train.Survived==1].value_counts()
dfpclass=pd.DataFrame({"未获救": pclass0,"获救": pclass1})
dfpclass.plot(kind='bar',stacked=True)
plt.title=("各等级船舱乘客人数")
plt.xlabel=("船舱等级")
plt.ylabel=("人数")
plt.show()

fig2=plt.figure()
sex0=data_train.Sex[data_train.Survived==0].value_counts()
sex1=data_train.Sex[data_train.Survived==1].value_counts()
dfsex=pd.DataFrame({"未获救": sex0,"获救": sex1})
plt.title=("男女乘客获救情况统计")
plt.xlabel=("性别")
plt.ylabel=("人数")
dfsex.plot(kind='bar',stacked=True)
plt.show()

fig3=plt.figure()
x=np.array(data_train.Age)
y=np.array(data_train.Survived)
plt.scatter(x,y)
plt.title=("年龄与获救人数")
plt.xlabel=("年龄")
plt.ylabel=("人数")
plt.show()

fig4=plt.figure()
sibsp0=data_train.SibSp[data_train.Survived==0].value_counts()
sibsp1=data_train.SibSp[data_train.Survived==1].value_counts()
dfsibsp=pd.DataFrame({"未获救": sibsp0,"获救": sibsp1})
dfsibsp.plot(kind='bar',stacked=True)
plt.title=("拥有不同兄弟姐妹、堂兄弟姐妹人数的乘客获救人数")
plt.xlabel=("兄弟姐妹、堂兄弟姐妹人数")
plt.ylabel=("人数")
plt.show()


fig5=plt.figure()
parch0=data_train.Parch[data_train.Survived==0].value_counts()
parch1=data_train.Parch[data_train.Survived==1].value_counts()
dfparch=pd.DataFrame({"未获救": parch0,"获救": parch1})
dfparch.plot(kind='bar',stacked=True)
plt.title=("父母与子女个数的乘客获救人数")
plt.xlabel=("父母与子女个数")
plt.ylabel=("人数")
plt.show()

fig6=plt.figure()
embarked0=data_train.Embarked[data_train.Survived==0].value_counts()
embarked1=data_train.Embarked[data_train.Survived==1].value_counts()
dfembarked=pd.DataFrame({"未获救": embarked0,"获救": embarked1})
dfembarked.plot(kind='bar',stacked=True)
print("embarked0",embarked0)
plt.title=("登船港口与乘客获救人数")
plt.xlabel=("登船港口")
plt.ylabel=("人数")
plt.show()

#船舱信息对获救的影响
#查看船舱信息的分布
print(data_train.Cabin.value_counts())
#查看船舱信息的有无对获救情况的影响
fig7=plt.figure()
survived_nocabin=data_train.Survived[data_train.Cabin.isnull()].value_counts()
survived_cabin=data_train.Survived[data_train.Cabin.notnull()].value_counts()
dfcabin=pd.DataFrame({"没有cabin":survived_cabin,"有cabin":survived_nocabin}).transpose()
dfcabin.plot(kind="bar",stacked=True)
plt.title=("有无Cabin信息与乘客获救人数关系")
plt.xlabel=("有无Cabin信息")
plt.ylabel=("人数")
plt.show()
print("dfcabin",dfcabin)

#3.数据预处理
#3.1 Cabin和Age信息预处理
'''Cabin、Age和Embarked信息存在缺失,其中Embarked只缺失2个数据,重点处理Cabin和Age
为了补充缺失数据,采用scikit-learn中的RandomForst来拟合补充缺失数据
'''


from sklearn.ensemble import RandomForestRegressor

#运用RandomForestRegressor来填补缺失的年龄属性
#用已有数据的特征值去预测缺失的年龄值
def set_null_ages(df):
    feature=df[["Age","Fare","Parch","SibSp","Pclass"]]  #获取特征值,
    age_known=feature[feature.Age.notnull()].values
    age_unknown=feature[feature.Age.isnull()].values
    x=age_known[:,1:]
    y=age_known[:,0]

    train_result=RandomForestRegressor(random_state=0,n_estimators=2000,n_jobs=-1)
    train_result.fit(x,y)

    #利用上述方法处理未知年龄集的特征值,对未知年龄进行预测
    agepredict=train_result.predict(age_unknown[:,1:])
    df.loc[df.Age.isnull(),"Age"]=agepredict
    return df,train_result


def set_cabin_type(df):
    df.loc[(df.Cabin.notnull()), "Cabin"] = "Yes"
    df.loc[(df.Cabin.isnull()),"Cabin"]="No"    #这个顺序不能弄反了,不能先no,后yes,不然全部变成yes
    return df


data_train,train_result=set_null_ages(data_train)
data_train = set_cabin_type(data_train)
print("train_result:\n",train_result)
print("dfcabin",data_train)
#print("dfcabin",dfcabin)  #没有错,没有cabin的还是原来那么多

#(二)特征因子化,就是把每一个不能用逻辑判断的属性转换为可以用逻辑判断的属性。真就用1表示,假就用0表示。
dummies_cabin=pd.get_dummies(data_train.Cabin,prefix="Cabin")
dummies_sex=pd.get_dummies(data_train["Sex"],prefix="Sex")
dummies_embarked=pd.get_dummies(data_train["Embarked"],prefix="Embarked")
dummies_pclass=pd.get_dummies(data_train["Pclass"],prefix="Pclass")
print("df0",data_train.columns)
data_train=pd.concat([data_train,dummies_cabin,dummies_embarked,dummies_pclass,dummies_sex],axis=1)  #不要漏掉data_train
print("df1",data_train.columns)
data_train.drop(["Name","Ticket","Cabin","Embarked","Pclass","Sex"],axis=1,inplace=True)
print("df2",data_train.columns)
#print(type())
#print("df2",df)

#(三)数据标准化
import sklearn.preprocessing as processing

scaler=processing.StandardScaler()
age_scaler_parameter=scaler.fit(data_train['Age'].values.reshape(-1,1))
data_train["Age_scaler"]=scaler.fit_transform(data_train["Age"].values.reshape(-1,1),age_scaler_parameter)
fare_scaler_parameter=scaler.fit(data_train.Fare.values.reshape(-1,1))
data_train["Fare_scaler"]=scaler. fit_transform(data_train.Fare.values.reshape(-1,1),fare_scaler_parameter)
print("df3",data_train.columns)
print("df3.info")
print(data_train.info())
'''
Index(['PassengerId', 'Survived', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin_No',
       'Cabin_Yes', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Pclass_1',
       'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Age_scaler',
       'Fare_scaler'],
      dtype='object')
'''

#(四)建立模型
#4.1 利用处理好的训练集数据拟合模型
from sklearn import linear_model
#from sklearn import logisticRegression
data_train_character=data_train.filter(regex="Survived|SibSp|Parch|Cabin_*|Embarked_*|Pclass_*|Sex_*|Age_scaler|Fare_scaler")
data_train_matrix=data_train_character.values
print("data_train_character",data_train_character)
y=data_train_matrix[:,0]
x=data_train_matrix[:,1:]

data_train_logicalmodel=linear_model.LogisticRegression(C=1,penalty="l1",tol=1e-6)
data_train_logicalmodel.fit(x,y)
print(data_train_logicalmodel)

#4.2对测试数据集进行预处理
data_test=pd.read_csv("C:/Users/Administrator/Desktop/kaggle/Titanic-dataset/test.csv")
test_feature=data_test[["Age","Fare","Parch","SibSp","Pclass"]]
#将data_test中的Age空的部分通过预测补上
data_test_age=test_feature[data_test.Age.isnull()].values
x=data_test_age[:,1:]
data_test_agepredict=train_result.predict(x)
data_test.loc[(data_test.Age.isnull()),"Age"]=data_test_agepredict
print("data_test.Age")
print(data_test.Age)
print("data_test.columns")
print(data_test.columns)
#对Cabin进行处理
data_test=set_cabin_type(data_test)
#对Fare的缺失值进行处理,看表格是缺一个
data_test.Fare[data_test.Fare.isnull()]=data_test.Fare.mean()
print("data_test.Fare[data_test.Fara.isnull]")

#特征因子化
dummies_cabin=pd.get_dummies(data_test.Cabin,prefix="Cabin")
dummies_sex=pd.get_dummies(data_test["Sex"],prefix="Sex")
dummies_embarked=pd.get_dummies(data_test["Embarked"],prefix="Embarked")
dummies_pclass=pd.get_dummies(data_test["Pclass"],prefix="Pclass")
data_test=pd.concat([data_test,dummies_cabin,dummies_embarked,dummies_pclass,dummies_sex],axis=1)
#删掉需要逻辑处理的列
data_test.drop(["Name","Ticket","Cabin","Embarked","Pclass","Sex"],axis=1,inplace=True)

#Fare和Age标准化
data_test['Age_scaler']=scaler.fit_transform(data_test.Age.values.reshape(-1,1),age_scaler_parameter)
data_test["Fare_scaler"]=scaler.fit_transform(data_test.Fare.values.reshape(-1,1),fare_scaler_parameter)
print("data_test",data_test)
print(data_test.info())
print(data_test.columns)
#预测数据
data_test_character=data_test.filter(regex="PassengerID|SibSp|Parch|Cabin_*|Embarked_*|Pclass_*|Sex_*|Age_scaler|Fare_scaler")
data_test_character1=data_test_character.as_matrix()
ytest=data_test_character1[:,0]
xtest=data_test_character1
print("data_test_character.columns",data_test_character.columns)
#print("data_test_character.values",data_test_character1.columns)
print("xtest")
print(xtest)
predictions=data_train_logicalmodel.predict(xtest)
print("data_test",data_test)
print("data_test_character",data_test_character)

predictresult=pd.DataFrame({"PassengerId":data_test.PassengerId,"Survived":predictions.astype(np.int32)})
print(predictresult)
predictresult.to_csv("C:/Users/Administrator/Desktop/kaggle/Titanic-dataset/predictresult.csv",index=False)



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值