6.tensorflow:线性逻辑回归进行分类

#coding:utf-8
"""
生成随机数据
进行线性回归
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle

# 模拟数据点
def generate(sample_size, mean, cov, diff):
    samples_per_class = int(sample_size / 2)

    X0 = np.random.multivariate_normal(mean, cov, samples_per_class)
    Y0 = np.zeros(samples_per_class)

    for ci, d in enumerate(diff):
        X1 = np.random.multivariate_normal(mean + d, cov, samples_per_class)
        Y1 = (ci + 1) * np.ones(samples_per_class)

        X0 = np.concatenate((X0, X1))
        Y0 = np.concatenate((Y0, Y1))

    X, Y = shuffle(X0, Y0)

    return X, Y


input_dim = 2
np.random.seed(10)
num_classes = 2  # 2分类
mean = np.random.randn(num_classes)
cov = np.eye(num_classes)
# dataset
X, Y = generate(1000, mean, cov, [3.0])
print(len(X), len(Y))
colors = ['r' if l == 0 else 'b' for l in Y[:]]
plt.scatter(X[:, 0], X[:, 1], c=colors)
plt.xlabel("Scaled age (in yrs)")
plt.ylabel("Tumor size (in cm)")
plt.show()

tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape=[None, input_dim])
y = tf.placeholder(tf.float32, shape=[None, 1])

w = tf.Variable(initial_value=tf.random_normal([input_dim, 1]), name="weight") # 2 x 1
b = tf.Variable(initial_value=tf.random_normal([1]), name="bias")  # 1

logist = tf.matmul(x, w) + b
output = tf.nn.sigmoid(logist)
cost = tf.reduce_mean(-(y * tf.log(output) + (1-y) * tf.log(1 - output)))
optimizer = tf.train.AdamOptimizer(learning_rate=0.04).minimize(cost)

epochs = 50
batch_size = 64
display_step = 1
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(epochs):
        total_batch = len(Y) / batch_size
        epoch_loss = 0
        for i in range(total_batch):
            # print("batch:", i)
            batch_x = X[i*batch_size:(i+1)*batch_size, :]
            batch_y = Y[i*batch_size:(i+1)*batch_size]
            batch_y = np.reshape(batch_y, [-1, 1])
            _, loss = sess.run([optimizer, cost], feed_dict={x:batch_x, y:batch_y})
            epoch_loss += loss
        epoch_loss /= total_batch
        if epoch % display_step == 0:
            print("epoch: %d, loss: %.4f" % (epoch, epoch_loss))
    print("done..")
    # 图形显示
    train_X, train_Y = generate(100, mean, cov, [3.0])
    colors = ['r' if l == 0 else 'b' for l in train_Y[:]]
    plt.scatter(train_X[:, 0], train_X[:, 1], c=colors)
    # plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y)
    # plt.colorbar()

    #    x1w1+x2*w2+b=0
    #    x2=-x1* w1/w2-b/w2
    x = np.linspace(-1, 8, 200)  #未来画分类平面,需要给一个x1, 通过训练好当权重求另一个x2,即二维坐标中当x,y
    y = -x * (sess.run(w)[0] / sess.run(w)[1]) - sess.run(b) / sess.run(w)[1]
    plt.plot(x, y, label='predict line')
    plt.xlabel("x1")
    plt.ylabel("x2")
    plt.legend()
    plt.show()

输出:

 

/usr/local/bin/python2.7 /Users/ming/Downloads/zhangming/tf_demo/6.tf_liner_logistic.py
(1000, 1000)
2018-11-18 22:03:53.440080: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
epoch: 0, loss: 1.9323
epoch: 1, loss: 1.0213
epoch: 2, loss: 0.8418
epoch: 3, loss: 0.6548
epoch: 4, loss: 0.5172
epoch: 5, loss: 0.4096
epoch: 6, loss: 0.3318
epoch: 7, loss: 0.2774
epoch: 8, loss: 0.2391
epoch: 9, loss: 0.2114
epoch: 10, loss: 0.1907
epoch: 11, loss: 0.1746
epoch: 12, loss: 0.1618
epoch: 13, loss: 0.1512
epoch: 14, loss: 0.1424
epoch: 15, loss: 0.1348
epoch: 16, loss: 0.1283
epoch: 17, loss: 0.1226
epoch: 18, loss: 0.1175
epoch: 19, loss: 0.1129
epoch: 20, loss: 0.1088
epoch: 21, loss: 0.1052
epoch: 22, loss: 0.1018
epoch: 23, loss: 0.0987
epoch: 24, loss: 0.0959
epoch: 25, loss: 0.0933
epoch: 26, loss: 0.0909
epoch: 27, loss: 0.0887
epoch: 28, loss: 0.0866
epoch: 29, loss: 0.0847
epoch: 30, loss: 0.0829
epoch: 31, loss: 0.0812
epoch: 32, loss: 0.0796
epoch: 33, loss: 0.0781
epoch: 34, loss: 0.0767
epoch: 35, loss: 0.0754
epoch: 36, loss: 0.0741
epoch: 37, loss: 0.0730
epoch: 38, loss: 0.0718
epoch: 39, loss: 0.0708
epoch: 40, loss: 0.0698
epoch: 41, loss: 0.0688
epoch: 42, loss: 0.0679
epoch: 43, loss: 0.0670
epoch: 44, loss: 0.0662
epoch: 45, loss: 0.0654
epoch: 46, loss: 0.0647
epoch: 47, loss: 0.0640
epoch: 48, loss: 0.0633
epoch: 49, loss: 0.0626
done..

Process finished with exit code 0
 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值