二分类问题(泰坦尼克号获救预测)

# -*- coding: utf-8 -*-


# @Time    : 2018/12/13 10:46
# @Author  : WenZhao
# @Email   : 46546924@qq.com
# @File    : tt.py
# @Software: PyCharm
'''
    泰坦尼克号预测(线性回归二分类)
    1.pandas数据处理
    2.numpy数据作为tensorflow的输入
    3.在线性回归的基础上增加sigmoid函数实现二分类
    4.交叉熵
    5.构造batch训练(batch越多构造训练结果越稳定)
    6.训练速度
    7.训练结果的可视化
'''

import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('./data/tt/train.csv')
data=data[[ 'Survived', 'Pclass','Sex', 'Age', 'SibSp',
       'Parch', 'Fare', 'Cabin', 'Embarked']]
data['Age']=data['Age'].fillna(data['Age'].mean())
data['Cabin']=pd.factorize(data.Cabin)[0]
data.fillna(0,inplace=True)
# print(data['Pclass'])
data['Sex']=[1 if x=='male' else 0 for x in data.Sex]
data['p1']=np.array(data['Pclass']==1).astype(np.int32)
data['p2']=np.array(data['Pclass']==2).astype(np.int32)
data['p3']=np.array(data['Pclass']==3).astype(np.int32)
del data['Pclass']
data['e1']=np.array(data['Embarked']=='S').astype(np.int32)
data['e2']=np.array(data['Embarked']=='C').astype(np.int32)
data['e3']=np.array(data['Embarked']=='Q').astype(np.int32)
del data['Embarked']
data_train=data[['Sex', 'Age', 'SibSp',
       'Parch', 'Fare', 'Cabin', 'p1','p2','p3','e1','e2','e3']].values
data_target=data['Survived'].values.reshape(len(data),1)

# 构建神经网络
x=tf.placeholder("float",shape=[None,12])
y=tf.placeholder("float",shape=[None,1])
# output=weight*x+bias
weight=tf.Variable(tf.random_normal([12,1]))
bias=tf.Variable(tf.random_normal([1]))
output=tf.matmul(x,weight)+bias
pred=tf.cast(tf.sigmoid(output)>0.5,tf.float32)

# 损失函数
loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y,logits=output))

# 梯度下降训练
train_step=tf.train.GradientDescentOptimizer(0.0003).minimize(loss)

# 计算准确率
accuracy=tf.reduce_mean(tf.cast(tf.equal(pred,y),tf.float32))

# test测试

data_test=pd.read_csv('./data/tt/test.csv')
data_test=data_test[[ 'Pclass','Sex', 'Age', 'SibSp',
       'Parch', 'Fare', 'Cabin', 'Embarked']]
data_test['Age']=data_test['Age'].fillna(data_test['Age'].mean())
data_test['Cabin']=pd.factorize(data_test.Cabin)[0]
data_test.fillna(0,inplace=True)
# print(data['Pclass'])
data_test['Sex']=[1 if x=='male' else 0 for x in data_test.Sex]
data_test['p1']=np.array(data_test['Pclass']==1).astype(np.int32)
data_test['p2']=np.array(data_test['Pclass']==2).astype(np.int32)
data_test['p3']=np.array(data_test['Pclass']==3).astype(np.int32)
del data_test['Pclass']
data_test['e1']=np.array(data_test['Embarked']=='S').astype(np.int32)
data_test['e2']=np.array(data_test['Embarked']=='C').astype(np.int32)
data_test['e3']=np.array(data_test['Embarked']=='Q').astype(np.int32)
del data_test['Embarked']


test_label=pd.read_csv('./data/tt/gender.csv')
test_label=np.reshape(test_label.Survived.values.astype(np.int32),(418,1))

sess=tf.Session()
sess.run(tf.global_variables_initializer())

loss_train=[]
train_acc=[]
test_acc=[]

# 开始训练
for i in range(25000):
       # 乱序
       index=np.random.permutation(range(data_target.shape[0]))
       data_train=data_train[index]
       data_target=data_target[index]
       for n in range(len(data_target)//100+1):
              batch_xs=data_train[n*100:n*100+100]
              batch_ys=data_target[n*100:n*100+100]
              sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})

       if i%1000==0:
              loss_temp=sess.run(loss,feed_dict={x:batch_xs,y:batch_ys})
              loss_train.append(loss)
              train_acc_temp=sess.run(accuracy,feed_dict={x:batch_xs,y:batch_ys})
              train_acc.append(train_acc_temp)
              test_acc_temp=sess.run(accuracy,feed_dict={x:data_test,y:test_label})
              test_acc.append(test_acc_temp)
              print(loss_temp,train_acc_temp,test_acc_temp)


plt.plot(train_acc,'b-',label='train_acc')
plt.plot(test_acc,'r--',label='test_acc')

plt.title('train and test accuracy')
plt.legend()
plt.show()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值