Kaggle Titanic 存活率预测-利用tenforflow建立多层神经网络及过拟合处理

几十遍调参和特征工程总结:
1.训练数据只有889条,其实并不利于神经网络大发挥。训练有上限,~80%, 在kaggle上排名top20%。也确保了下限,~70%,在kaggle上排名bot20%.
2.特征工程对训练效果的提高非常局限。因为神经网络本身就是一个自动特征工程过程。
3.建立了一个可以调节隐藏层神经元数和隐藏层层数的模型。2层25,6训练准确度可达96.9%,3层隐藏层的训练准确度在93%左右。
4.过拟合现象严重,训练效果可以达到96.9%的准确率(隐藏层[25,6]),但测试准确率在70%多。
5.加入了drop out,以减小过拟合现象,但效果有限。(训练数据少)
6.对所有输入数据进行了正则化处理。
7.Adam优化器>>Adagrad\Momentum\Optimizer\GD
8.参数初始化对结果有一定影响。目前采用uniform分布。
9.损失函数采用完全平方差,效果与二分类交叉熵损失sigmoid_cross_entropy无差异甚至更好。(Y-Pred)2

经过不断尝试,最终测试准确度从74%提升至79.425%(top20%).在训练数据有限且只用MLP的情况下,还是属于一个比较好的成绩。


import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf

pd.set_option('display.max_columns',20)  #每行输出框可以显示12列
pd.set_option('display.width', 1000)      #每行输出字数200


df=pd.read_csv('train.csv')
print(df.info())

print('\n','===============================下一步,显示description===============================','\n')
#print(df.describe(include='all'))
#print(df.isna().sum())
print('\n','===============================下一步,把Ticket列去除===============================','\n')
df=df.drop('Ticket',axis=1)
#print(df)
print('\n','===============================下一步,把Embarked没有赋值的两行去除===============================','\n')
df=df.dropna(subset = ['Embarked'])            #是否要添加inplace=True.
print(df.shape)
print(df.head())

print('\n','===============================下一步,不同因素分析===============================','\n')
print('1.Pclass与Survived')
df[['Survived','Pclass']].groupby(['Pclass']).mean().plot.bar()
#plt.show()
print('     影响很大,1>2>3')

print('2.Name与Survived','\n','     待定')

print('3.Sex与Survived')
df[['Sex','Survived']].groupby(['Sex']).mean().plot.bar()
df[['Sex','Pclass','Survived']].groupby(['Sex','Pclass']).mean().plot.bar()
#plt.show()
print('     影响很大,female>male')

print('4.Age与Survived')
df[['Age','Survived']].groupby(['Age']).mean().plot.bar()
bins1 = [0, 12, 18, 65, 100]
df['Age_group'] = pd.cut(df['Age'], bins1)
df[['Age_group','Survived']].groupby(['Age_group']).mean().plot.bar()
#plt.show()
print('     小孩和老人的存活率高')

print('5.Sibsp,Parch与Survived')
df[['SibSp','Survived']].groupby(['SibSp']).mean().plot.bar()
df[['Parch','Survived']].groupby(['Parch']).mean().plot.bar()
#plt.show()
print('     有亲属的存活率稍高,但不是越多越高')

print('6.Fare与Survived')
#print(df['Fare'].describe())
bins2 = [0, 50, 100, 150, 200, 550 ]
df['Fare_group']=pd.cut(df['Fare'],bins2)
df[['Fare_group','Survived']].groupby(['Fare_group']).mean().plot.bar()
#plt.show()
print('     票价低的存活率低')

print('7.Cabin与Survived')
for i in range(0,df.shape[0]):  #把canbin简化成A,B,C,D,E,F
    if isinstance(df.iloc[i,9],str):
        df.iloc[i, 9] = df.iloc[i, 9][0]
    else:
        pass

df['Cabin']=df['Cabin'].fillna(0)  #把NAN换成0
#print(df.head(10))
df[['Cabin','Survived']].groupby(['Cabin']).mean().plot.bar()
#plt.show()
print('     Cabin B,E,F存活率较高,无Cabin存活率低')

print('8. Embarked与Survived')
df[['Embarked','Survived']].groupby(['Embarked']).mean().plot.bar()
#plt.show()
print('     瑟堡>昆士敦>南开普敦')
print('\n','****不同因素分析完成****','\n')


print('9. Name length与Survived')
df['Name_length']=df['Name'].apply(len)
df[['Name_length','Survived']].groupby(['Name_length']).mean().plot.bar()
#plt.show()


print('\n','===============================下一步,特征工程(特征的数据化)===============================','\n')

print('1.转化sex,dummy处理')
df.loc[df['Sex']=='male','Sex']=0
df.loc[df['Sex']=='female','Sex']=1

print('2.转化Cabin,dummy处理')
df.loc[df['Cabin']=='A','Cabin']=1
df.loc[df['Cabin']=='B','Cabin']=2
df.loc[df['Cabin']=='C','Cabin']=3
df.loc[df['Cabin']=='D','Cabin']=4
df.loc[df['Cabin']=='E','Cabin']=5
df.loc[df['Cabin']=='F','Cabin']=6
df.loc[df['Cabin']=='G','Cabin']=7
df.loc[df['Cabin']=='T','Cabin']=8

print('3.转化Embarked,dummy处理')
df.loc[df['Embarked']=='S','Embarked']=1
df.loc[df['Embarked']=='C','Embarked']=2
df.loc[df['Embarked']=='Q','Embarked']=3
#print(df.head(20))
#print(df.describe(include='all'))


print('4.利用随机森林方法补全Age,并添加Age_level')
from sklearn.ensemble import RandomForestRegressor
age = df[['Age','Survived','Fare','Parch','SibSp','Pclass']] #生成名为age的DataFrame

age_notnull = age.loc[age.Age.notnull()]   #生成名为age_notnull的DataFrame
age_isnull = age.loc[age.Age.isnull()]

X_rfr = age_notnull.values[:,1:]   #输入X_rfr值是除Age列外其他的数据,Survived,Fare,Parch,SibSp,Pclass
Y_rfr = age_notnull.values[:,0]    #标签Y_rfr值就是Age列
rfr = RandomForestRegressor(n_estimators=1000,n_jobs=-1)  #
rfr.fit(X_rfr,Y_rfr)
predictAges = rfr.predict(age_isnull.values[:,1:])

# P=[]
# for x in predictAges:
#     x=round(x)
#     P.append(x)
#rounded = [np.round(x) for x in predictAges]  #预测年龄取整,但是最后会使神经网络的prediction输出也round.所以不用round
df.loc[df.Age.isnull(),'Age'] = predictAges
# print(df.describe(include='all'))
# print(df.info())
# print(df.head(20))
df['Age_level']=np.nan

df.loc[df['Age']>15.0,'Age_level']=4 #male
df.loc[df['Sex']==1,'Age_level']=3  #female
df.loc[df['Age']<=15.0,'Age_level']=2 #6-15儿童
df.loc[df['Age']<=6.0,'Age_level']=1 #1-6岁的小儿
df.loc[df['Age']<=1.0,'Age_level']=0  #小于一岁的新生儿

print('5.把Name_length 分组')
df['Name_length_l']=np.nan
df.loc[df['Name_length']>=35,'Name_length_l']=2 #male
df.loc[df['Name_length']<35,'Name_length_l']=1 #male
df.loc[df['Name_length']<=25,'Name_length_l']=0 #male


print('6.通过Name增加称呼人物Title')
df['Title']=df['Name'].apply(lambda x:x.split(', ')[1]).apply(lambda x:x.split('.')[0])
df[['Title','Survived']].groupby(['Title']).mean().plot.bar()
df.loc[df['Title']=='Capt','Title']=0
df.loc[df['Title']=='Col','Title']=1
df.loc[df['Title']=='Don','Title']=2
df.loc[df['Title']=='Dr','Title']=3
df.loc[df['Title']=='Jonkheer','Title']=4
df.loc[df['Title']=='Lady','Title']=5
df.loc[df['Title']=='Major','Title']=6
df.loc[df['Title']=='Master','Title']=7
df.loc[df['Title']=='Miss','Title']=8
df.loc[df['Title']=='Mlle','Title']=9
df.loc[df['Title']=='Mme','Title']=10
df.loc[df['Title']=='Mr','Title']=11
df.loc[df['Title']=='Mrs','Title']=12
df.loc[df['Title']=='Ms','Title']=13
df.loc[df['Title']=='Rev','Title']=14
df.loc[df['Title']=='Sir','Title']=15
df.loc[df['Title']=='the Countess','Title']=16



print('7.增加Mother')
df['Mother']=0
df.loc[df['Sex']==1,'Mother']=1
df.loc[df['Parch']==0,'Mother']=0
df.loc[df['Age']<20,'Mother']=0



# print('8.增加独身')


print(df.head())

#plt.show()


print('\n','****特征工程完成****','\n')



print('\n','===============================下一步,正则化===============================','\n')
#正则化,(X-μ)/б2  (方差)

Age_mean=df['Age'].mean()
Age_vari=df['Age'].std(ddof=0)
Fare_mean=df['Fare'].mean()
Fare_vari=df['Fare'].std(ddof=0)
Name_length_mean=df['Name_length'].mean()
Name_length_vari=df['Name_length'].std(ddof=0)
Pclass_mean=df['Pclass'].mean()
Pclass_vari=df['Pclass'].std(ddof=0)
Cabin_mean=df['Cabin'].mean()
Cabin_vari=df['Cabin'].std(ddof=0)
Embarked_mean=df['Embarked'].mean()
Embarked_vari=df['Embarked'].std(ddof=0)
SibSp_mean=df['SibSp'].mean()
SibSp_vari=df['SibSp'].std(ddof=0)
Title_mean=df['Title'].mean()
Title_vari=df['Title'].std(ddof=0)

df['Age']=df['Age'].apply(lambda x: (x-df['Age'].mean())/df['Age'].std(ddof=0))
df['Fare']=df['Fare'].apply(lambda x: (x-df['Fare'].mean())/df['Fare'].std(ddof=0))
df['Name_length']=df['Name_length'].apply(lambda x: (x-df['Name_length'].mean())/df['Name_length'].std(ddof=0))
df['Pclass']=df['Pclass'].apply(lambda x: (x-df['Pclass'].mean())/df['Pclass'].std(ddof=0))
df['Cabin']=df['Cabin'].apply(lambda x: (x-df['Cabin'].mean())/df['Cabin'].std(ddof=0))
df['Embarked']=df['Embarked'].apply(lambda x: (x-df['Embarked'].mean())/df['Embarked'].std(ddof=0))
df['SibSp']=df['SibSp'].apply(lambda x: (x-df['SibSp'].mean())/df['SibSp'].std(ddof=0))
df['Title']=df['Title'].apply(lambda x: (x-df['Title'].mean())/df['Title'].std(ddof=0))

print('\n','===============================下一步,数据关系===============================','\n')
sns.heatmap(df[['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Age_level','Name_length_l','Cabin','Title','Mother']].corr(), annot=True, fmt = ".2f", cmap = "coolwarm")
#plt.show()


print('\n','===============================下一步,建模===============================','\n')
print(df.head(20))
print('1.输入输出数据')
X_data=df[['Pclass','Sex','SibSp','Parch','Fare','Embarked','Age_level','Name_length_l']]  #[['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked','Age_level','Name_length_l','Cabin','Title','Mother']]
Y_data=df[['Survived']]
print(X_data.shape)
print(Y_data.shape)


print('2.超参数调节')
nn_struc=[25,15,10,]  #除输入层和输出层外的中间层的神经元数
lr=0.005
iters=15000
keep_prob_train=0.5


print('3.定义隐层结构')
def add_layer(input,in_size,out_size,keep_prob,activation=None):
    with tf.name_scope('Weight'):
        W=tf.Variable(tf.multiply(tf.random_uniform([in_size,out_size],minval=-0.5,maxval=0.5,name='W'),np.sqrt(2/nn_struc[-1])))
    with tf.name_scope('Bias'):
        b=tf.Variable(tf.zeros([1,out_size],name='b'))
    out_put=tf.matmul(input,W)+b
    out_put=tf.nn.dropout(out_put,keep_prob)                  #加入dropout
    if activation==None:
        out_put=out_put
    else:
        out_put=activation(out_put)
    return out_put


def n_mid_layers(input,in_size,nn_struc,keep_prob):
    L=input
    for i in range(len(nn_struc)):
        with tf.name_scope('Layer%d'%(i+1)):
            L=add_layer(L,in_size,nn_struc[i],keep_prob,activation=tf.nn.relu)
            in_size=nn_struc[i]
    return L

print('4.定义输入placeholder')
with tf.name_scope('Inputs'):
    X=tf.placeholder(tf.float32,[None,X_data.shape[1]],name='input_X')
    Y=tf.placeholder(tf.float32,[None,1],name='input_Y')
    keep_prob=tf.placeholder(tf.float32)


print('5.建立n层模型')
L_mid=n_mid_layers(X,X_data.shape[1],nn_struc,keep_prob)  #可以试一下9用X.shape[1]替代。

with tf.name_scope('Predition'):
    Pred=add_layer(L_mid,nn_struc[-1],1,keep_prob,activation=tf.nn.sigmoid)

print('6.定义预测结果和准确率')
Pred_final=tf.cast(tf.greater_equal(Pred,0.5),tf.float32)  #把Pred的概率值换成0和1
Accuracy=tf.reduce_mean(tf.cast(tf.equal(Pred_final,Y),tf.float32),0)  #计算预测准确率

print('7.计算损失函数')
with tf.name_scope('Loss'):
    Loss=tf.reduce_mean(tf.square(Y-Pred),axis=0,name='Loss')    #完全平方差
    # #Loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=Y,logits=Pred),axis=0,name='Loss')      #二分类交叉熵损失,sigmoid_cross_entropy,效果不如以上。

    # a=tf.multiply(Y,tf.log(Pred))       #手动编二分类交叉熵损失不成功,???????
    # b=tf.multiply(tf.subtract(1.0,Y),tf.log(tf.subtract(1.0,Pred)))
    # a=tf.multiply(-1.0,a)
    # b=tf.multiply(-1.0,b)
    # Loss=tf.reduce_mean(tf.add(a,b),axis=0)

print('8.反向传播和优化器')
with tf.name_scope('Train'):
    train_step=tf.train.AdamOptimizer(lr).minimize(Loss)
    #train_step=tf.train.AdagradOptimizer(lr).minimize(Loss)
    #train_step=tf.train.MomentumOptimizer(lr,0.9).minimize(Loss)
    #train_step=tf.train.GradientDescentOptimizer(lr).minimize(Loss)

print('9.定义全局参数初始化')
init=tf.global_variables_initializer()

print('10.运行模型')
sess=tf.Session()
Writer=tf.summary.FileWriter("output",sess.graph)

sess.run(init)
for i in range(iters+1):
    sess.run(train_step,feed_dict={X:X_data,Y:Y_data,keep_prob:keep_prob_train})
    if i%500==0:
        print(sess.run(Accuracy, feed_dict={X: X_data, Y: Y_data,keep_prob:1}))
    if i==iters:
        print(sess.run(Pred[:3],feed_dict={X:X_data,keep_prob:1}))
        print(sess.run(Pred_final[:3], feed_dict={X: X_data,keep_prob:1}))
        print(sess.run(Y[:3], feed_dict={X: X_data, Y: Y_data,keep_prob:1}))
        print('Loss=', sess.run(Loss, feed_dict={X: X_data, Y: Y_data,keep_prob:1}))
        print('准确度=',sess.run(Accuracy, feed_dict={X: X_data, Y: Y_data,keep_prob:1}))



print('\n','===============================下一步,测试数据===============================','\n')

dtest=pd.read_csv('test.csv')
print('1.数据处理和清洗')

#print(dtest.describe(include='all'))
#print(df.describe(include='all'))


print('     1.1 Fare 填充')
dtest['Fare']=dtest['Fare'].fillna(df['Fare'].mean())
#print(dtest.loc[150:154,'Fare'])

print('     1.2 转化sex,dummy处理 ')
dtest.loc[dtest['Sex']=='male','Sex']=0
dtest.loc[dtest['Sex']=='female','Sex']=1

print('     1.3 转化cabin,dummy处理 ')
for i in range(0,dtest.shape[0]):  #把canbin简化成A,B,C,D,E,F
    if isinstance(dtest.iloc[i,9],str):
        dtest.iloc[i, 9] = dtest.iloc[i, 9][0]
    else:
        pass

dtest['Cabin']=dtest['Cabin'].fillna(0)  #把NAN换成0
dtest.loc[dtest['Cabin']=='A','Cabin']=1
dtest.loc[dtest['Cabin']=='B','Cabin']=2
dtest.loc[dtest['Cabin']=='C','Cabin']=3
dtest.loc[dtest['Cabin']=='D','Cabin']=4
dtest.loc[dtest['Cabin']=='E','Cabin']=5
dtest.loc[dtest['Cabin']=='F','Cabin']=6
dtest.loc[dtest['Cabin']=='G','Cabin']=7

print('     1.4 转化Embarked,dummy处理')
dtest.loc[dtest['Embarked']=='S','Embarked']=1
dtest.loc[dtest['Embarked']=='C','Embarked']=2
dtest.loc[dtest['Embarked']=='Q','Embarked']=3

print('     1.5 填补Age及增加Age_level')
age = dtest[['Age','Sex','Fare','Parch','SibSp','Pclass']] #生成名为age的DataFrame

age_notnull = age.loc[age.Age.notnull()]   #生成名为age_notnull的DataFrame
age_isnull = age.loc[age.Age.isnull()]

X_rfr = age_notnull.values[:,1:]   #输入X_rfr值是除Age列外其他的数据,Survived,Fare,Parch,SibSp,Pclass
Y_rfr = age_notnull.values[:,0]    #标签Y_rfr值就是Age列
rfr = RandomForestRegressor(n_estimators=1000,n_jobs=-1)  #
rfr.fit(X_rfr,Y_rfr)
predictAges = rfr.predict(age_isnull.values[:,1:])
dtest.loc[dtest.Age.isnull(),'Age'] = predictAges

dtest['Age_level']=np.nan
dtest.loc[dtest['Age']>15.0,'Age_level']=4 #male
dtest.loc[dtest['Sex']==1,'Age_level']=3  #female
dtest.loc[dtest['Age']<=15.0,'Age_level']=2 #6-15儿童
dtest.loc[dtest['Age']<=6.0,'Age_level']=1 #1-6岁的小儿
dtest.loc[dtest['Age']<=1.0,'Age_level']=0  #小于一岁的新生儿

print('     1.6. 增加Name_length及Name_length_l')
dtest['Name_length']=dtest['Name'].apply(len)

dtest['Name_length_l']=np.nan
dtest.loc[dtest['Name_length']>=35,'Name_length_l']=2
dtest.loc[dtest['Name_length']<35,'Name_length_l']=1
dtest.loc[dtest['Name_length']<=25,'Name_length_l']=0

print('     1.7通过Name增加称呼人物Title')
dtest['Title']=dtest['Name'].apply(lambda x:x.split(', ')[1]).apply(lambda x:x.split('.')[0])
dtest[['Title','Pclass']].groupby(['Title']).mean().plot.bar()

print('     1.8.增加Mother')
dtest['Mother']=0
dtest.loc[dtest['Sex']==1,'Mother']=1
dtest.loc[dtest['Parch']==0,'Mother']=0
dtest.loc[dtest['Age']<20,'Mother']=0


dtest.loc[dtest['Title']=='Col','Title']=1
dtest.loc[dtest['Title']=='Dona','Title']=2
dtest.loc[dtest['Title']=='Dr','Title']=3
dtest.loc[dtest['Title']=='Master','Title']=7
dtest.loc[dtest['Title']=='Miss','Title']=8
dtest.loc[dtest['Title']=='Mr','Title']=11
dtest.loc[dtest['Title']=='Mrs','Title']=12
dtest.loc[dtest['Title']=='Ms','Title']=13
dtest.loc[dtest['Title']=='Rev','Title']=14



print('     1.8. 正则化,y用train数据的μ和方差')
dtest['Age']=dtest['Age'].apply(lambda x: ((x-Age_mean)/Age_vari))
dtest['Fare']=dtest['Fare'].apply(lambda x: ((x-Fare_mean)/Fare_vari))
dtest['Name_length']=dtest['Name_length'].apply(lambda x: ((x-Name_length_mean)/Name_length_vari))
dtest['Pclass']=dtest['Pclass'].apply(lambda x: ((x-Pclass_mean)/Pclass_vari))
dtest['Cabin']=dtest['Cabin'].apply(lambda x: ((x-Cabin_mean)/Cabin_vari))
dtest['Embarked']=dtest['Embarked'].apply(lambda x: ((x-Embarked_mean)/Embarked_vari))
dtest['SibSp']=dtest['SibSp'].apply(lambda x: ((x-SibSp_mean)/SibSp_vari))
dtest['Title']=dtest['Title'].apply(lambda x: ((x-Title_mean)/Title_vari))


#print(dtest.head(20))
#print(dtest.isna().sum())
#print(df.describe(include='all'))

print('2.预测结果')
X_test=dtest[['Pclass','Sex','SibSp','Parch','Fare','Embarked','Age_level','Name_length_l']]

Y_Prediction_test=sess.run(Pred_final,feed_dict={X:X_test,keep_prob:1})
print(Y_Prediction_test)




  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值