对数几率回归

从kaggle下载的泰坦尼克数据集

import tensorflow as tf

#对数几率回归参数和变量的初始化
W = tf.Variable(tf.zeros([5, 1]), name="weights")
b = tf.Variable(0.0, name="bias")

#之前的推断现在用于值的合并
def combine_inputs(X):
    return tf.matmul(X, W) + b

#新的推断是将sigmoid函数运用到前面的合并
def inference(X):
    return tf.sigmoid(combine_inputs(X))

#对于sigmoid函数,标配的损失函数是 交叉熵
def loss(X, Y):
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=combine_inputs(X), labels=Y))

#预测与评价模型
def evaluate(sess, X, Y):
    predicted = tf.cast(inference(X) > 0.5, tf.float32)
    print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))

#采用梯度下降优化器
def train(tol_loss):
    learning_rate = 0.01
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(tol_loss)

#读取csv文件
def read_csv(batch_size, file_name, record_defaults):

    filename_queue = tf.train.string_input_producer([file_name])
    reader = tf.TextLineReader(skip_header_lines=1)

    key, value = reader.read(filename_queue)

    decoded = tf.decode_csv(value, record_defaults=record_defaults)  # 字符串(文本行)转换到指定默认值张量列元组,为每列设置数据类型

    return tf.train.shuffle_batch(decoded, batch_size=batch_size, capacity=batch_size * 50,
                                  min_after_dequeue=batch_size)  # 读取文件,加载张量batch_size行



def inputs():
    passenger_id, survived, pclass, name, sex, age, sibsp, parch, ticket, fare,\
    cabin, embarked = read_csv(100, "/home/hadoop/PycharmProjects/tens/train.csv",
                                                                 [[0.0], [0.0], [0], [""], [""], [0.0], [0.0], [0.0],
                                                                 [""], [0.0], [""], [""]])

    is_first_class = tf.to_float(tf.equal(pclass, [1]))
    is_second_class = tf.to_float(tf.equal(pclass, [2]))
    is_third_class = tf.to_float(tf.equal(pclass, [3]))

    gender = tf.to_float(tf.equal(sex, ["female"]))

    features =  tf.transpose(tf.stack([is_first_class, is_second_class, is_third_class, gender, age]))
    survived = tf.reshape(survived, [100,1])

    return features, survived


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    X, Y = inputs()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    tol_loss = loss(X, Y)
    train_op = train(tol_loss)

    train_step = 1001
    for step in range(train_step):
        sess.run(train_op)
        if step % 100 == 0:
            print "%d loss" %step,  sess.run(tol_loss)

    evaluate(sess, X, Y)
    coord.request_stop()
    coord.join(threads)

预测结果:

0 loss 0.660088
100 loss 0.649859
200 loss 0.678345
300 loss 0.645126
400 loss 0.6176
500 loss 0.615742
600 loss 0.562361
700 loss 0.562884
800 loss 0.500807
900 loss 0.598788
1000 loss 0.564039
0.81
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值