Tensorflow实现线性逻辑回归分析:

 Tensorflow实现线性逻辑回归分析:

from random import shuffle

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

def generate (sample_size, mean, cov, diff, regression):
    num_classes = 2
    samples_per_class = int(sample_size/2)
    # 多元正态分布矩阵
    x0 = np.random.multivariate_normal(mean, cov, samples_per_class)
    y0 = np.zeros(samples_per_class)

    for ci, d in enumerate(diff):
        x1 = np.random.multivariate_normal(mean +d, cov, samples_per_class)
        y1 = (ci + 1) *np.ones(samples_per_class)

        x0 = np.concatenate((x0, x1))
        y0 = np.concatenate((y0, y1))
    if regression == False:
        class_ind = [ class_number for class_number in range(num_classes)]
        y = np.asarray(np.hstack(class_ind), dtype=np.float32)
    x,y = x0, y0
    return x,y

np.random.seed(10)
num_classes = 2
mean = np.random.randn(num_classes)
cov = np.eye(num_classes)
x , y = generate(100, mean, cov, [3.0], True)
# 可视化展示:
colors = ['r' if l == 0 else 'b' for l in y[:]]
plt.scatter(x[:,0] , x[:,1], c=colors)
plt.xlabel("Scalaed age ")
plt.ylabel("Tumor size ")
plt.show()

# 构建网络结构
input_dim = 2
lab_dim = 1
input_feature = tf.placeholder(tf.float32, [None, input_dim])
input_labels = tf.placeholder(tf.float32, [None, lab_dim])
# 定义参数
w = tf.Variable(tf.random_normal([input_dim , lab_dim]), name='weight')
b = tf.Variable(tf.zeros([lab_dim]), name = 'bias')

output = tf.nn.sigmoid(tf.matmul(input_feature, w) + b)
cross_entropy = -(input_labels * tf.log(output) + (1-input_labels)* tf.log(1-output))
ser = tf.square(output - input_labels)

loss =tf.reduce_mean(cross_entropy)
err = tf.reduce_mean(ser)
optimizer = tf.train.AdamOptimizer(0.04)
train = optimizer.minimize(loss)

# 设置参数进行训练
maxEpochs = 50
minibatchSize = 25
# 启动 session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    # 向模型输入数据
    for epoch in range(maxEpochs):
        sumerr = 0
        for i in range(np.int32( len(y)/minibatchSize)):
            x1 = x[i*minibatchSize: (i+1)*minibatchSize, :]
            y1 = np.reshape(y[i*minibatchSize: (i+1)*minibatchSize], [-1,1])
            tf.reshape(y1,[-1,1])
            _, lossval , outputval, errval = sess.run([train,loss, output,err], feed_dict = {
                input_feature: x1,input_labels :y1 })
            sumerr = sumerr + errval
        print("Epoch: ", '%04d'%(epoch+1), 'cost: ','{:.9f}'.format(lossval), 'err=',sumerr/minibatchSize)
    trian_x , trian_y = generate(100, mean, cov, [3.0], True)
    colors = ['r' if l==0 else 'b' for l in trian_y[:]]
    plt.scatter(trian_x[:,0], trian_x[:,1], c= colors)
    x = np.linspace(-1, 8 ,200)
    y = -x*(sess.run(w)[0]/sess.run(w)[1])-sess.run(b)/sess.run(w)[1]
    plt.plot(x,y, label='Fitted line')
    plt.legend()
    plt.show()





可视化结果展示:

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值