《深度学习之Tensorflow》学习笔记五

线性逻辑回归

二分类与多分类

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.utils import shuffle
from matplotlib.colors import colorConverter, ListedColormap

#多分类
from sklearn.preprocessing import OneHotEncoder
def onehot(y,start,end):
    ohe = OneHotEncoder()
    a = np.linspace(start, end-1, end-start)
    b = np.reshape(a,[-1,1]).astype(np.int32)
    ohe.fit(b)
    c = ohe.transform(y).toarray()
    return c

# def generate(sample_size, mean, cov, diff, regression):
#     num_classes = 2
#     sample_per_class = int(sample_size/2)
#
#     X0 = np.random.multivariate_normal(mean, cov, sample_per_class)
#     Y0 = np.zeros(sample_per_class)
#     # print(X0)
#     for ci, d in enumerate(diff):
#         X1 = np.random.multivariate_normal(mean+d, cov, sample_per_class)
#         Y1 = (ci+1)*np.ones(sample_per_class)
#
#         X0 = np.concatenate((X0,X1))
#         Y0 = np.concatenate((Y0,Y1))
#
#     if regression==False:
#         class_ind = [Y==class_number for class_number in range(num_classes)]
#         # Y = np.asarray(np.hstack(class_ind), dtype=np.float32)
#         Y = np.hstack(class_ind)
#
#     return X0,Y0

# 二分类
# np.random.seed(10)
# num_classes = 2
# mean = np.random.randn(num_classes)
# cov = np.eye(num_classes)
# X,Y = generate(100, mean, cov, [3.],True)
#
# colors = ['r' if i == 0 else 'b' for i in Y[:]]
#
# # plt.scatter(X[:,0], X[:,1],c=colors)
# # # plt.show()
# # print(X,Y)
#
# lab_dim = 1
# input_dim = 2
# input_features = tf.placeholder(tf.float32,[None, input_dim])
# input_lables = tf.placeholder(tf.float32, [None, lab_dim])
#
# W = tf.Variable(tf.random_normal([input_dim, lab_dim]),name='weight')
# b = tf.Variable(tf.zeros([lab_dim]),name='bias')
#
# output = tf.nn.sigmoid(tf.matmul(input_features, W) + b)
# cross_entropy = -(input_lables * tf.log(output) + (1-input_lables) * tf.log(1-output))
# ser = tf.square(input_lables - output)
# loss = tf.reduce_mean(cross_entropy)
# err = tf.reduce_mean(ser)
# train = tf.train.AdamOptimizer(0.04).minimize(loss)
#
# maxEpochs = 50
# minibatchsize = 25
#
# with tf.Session() as sess:
#     sess.run(tf.global_variables_initializer())
#
#     for epoch in range(maxEpochs):
#         sumerr = 0
#         for i in range(np.int32(len(Y)/minibatchsize)):
#             x1 = X[i*minibatchsize:(i+1)*minibatchsize,:]
#             y1 = np.reshape(Y[i*minibatchsize:(i+1)*minibatchsize],[-1,1])
#             _, lossval, outputval, errval = sess.run([train,loss,output,err],{input_features:x1, input_lables:y1})
#             sumerr = sumerr + errval
#
#         print("Epoch:","%04d" % (epoch+1)," cost=","{:.9f}".format(lossval), " err=",sumerr/minibatchsize)
#
#
#     plt.scatter(X[:,0],X[:,1],c=colors)
#     x = np.linspace(-1,8,200)
#     y = -x*(sess.run(W)[0]/sess.run(W)[1])-sess.run(b)/sess.run(W)[1]
#     plt.plot(x,y,label='Fitted line')
#     plt.legend()
#     plt.show()

# 多分类
def generate(sample_size, num_classes, diff, regression):
    np.random.seed( 10 )
    mean = np.random.randn( 2 )
    cov = np.eye( 2 )
    sample_per_class = int(sample_size/num_classes)

    X0 = np.random.multivariate_normal(mean, cov, sample_per_class)
    Y0 = np.zeros(sample_per_class)
    # print(X0)
    for ci, d in enumerate(diff):
        X1 = np.random.multivariate_normal(mean+d, cov, sample_per_class)
        Y1 = (ci+1)*np.ones(sample_per_class)

        X0 = np.concatenate((X0,X1))
        Y0 = np.concatenate((Y0,Y1))

    if regression==False:
        Y0 = np.reshape(Y0,[-1,1])
        Y0 = onehot(Y0.astype(np.int32), 0, num_classes)
    X, Y = shuffle(X0, Y0)
    return X,Y

np.random.seed(10)

input_dim = 2
num_classes = 3

X,Y = generate(2000, num_classes,[[3.],[3.,0]],False)
aa = [np.argmax(i) for i in Y]

lab_dim = num_classes

input_features = tf.placeholder(tf.float32, [None, input_dim])
input_lables = tf.placeholder(tf.float32, [None, lab_dim])

W = tf.Variable(tf.random_normal([input_dim, lab_dim]), name="weight")
b = tf.Variable(tf.zeros([lab_dim]), name="bias")
output = tf.matmul(input_features, W) + b

z = tf.nn.softmax(output)

a1 = tf.argmax(tf.nn.softmax(output), axis=1)
b1 = tf.argmax(input_lables, axis=1)
err = tf.count_nonzero(a1-b1)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=input_lables, logits=output)
loss = tf.reduce_mean(cross_entropy)

train = tf.train.AdamOptimizer(0.04).minimize(loss)

maxEpoches = 50
minibatchSize = 25

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for epoch in range(maxEpoches):
        sumerr = 0
        for i in range(np.int32(len(Y)/minibatchSize)):
            x1 = X[i*minibatchSize:(i+1)*minibatchSize,:]
            y1 = Y[i*minibatchSize:(i+1)*minibatchSize,:]

            _,lossval,outputval,errval = sess.run([train,loss,output,err],{input_features:x1, input_lables:y1})
            sumerr = sumerr + (errval/minibatchSize)

        print("Epoch:",'%04d'%(epoch+1)," cost=","{:.9f}".format(lossval),"err=",sumerr/minibatchSize)

    colors = ['r' if i == 0 else 'b' if i == 1 else 'y' for i in aa[:]]

    plt.scatter( X[:, 0], X[:, 1], c=colors )

    # x = np.linspace(-1,8,200)
    # y=-x*(sess.run(W)[0][0]/sess.run(W)[1][0])-sess.run(b)[0]/sess.run(W)[1][0]
    # plt.plot(x,y, label='first line',lw=3)
    # y = -x * (sess.run( W )[0][1] / sess.run( W )[1][1]) - sess.run( b )[1] / sess.run( W )[1][1]
    # plt.plot( x, y, label='second line', lw=2 )
    # y = -x * (sess.run( W )[0][2] / sess.run( W )[1][2]) - sess.run( b )[2] / sess.run( W )[1][2]
    # plt.plot( x, y, label='third line', lw=1 )
    # plt.legend()
    # plt.show()
    # print(sess.run(W), sess.run(b))


# train_X ,train_Y = generate(200, num_classes, [[3.],[3.,0]], False)
    #   aa = [np.argmax(i) for i in train_Y]
    # colors = ['r' if i == 0 else 'b' if i==1 else 'y' for i in aa[:]]
    # plt.scatter(train_X[:,0], train_X[:,1], c=colors)

    nb_of_xs = 200
    xs1 = np.linspace(-2.5, 8, num=nb_of_xs)
    xs2 = np.linspace(-2.5, 8, num=nb_of_xs)
    xx, yy = np.meshgrid(xs1, xs2)

    classication_place = np.zeros((nb_of_xs, nb_of_xs))

    for i in range(nb_of_xs):
        for j in range(nb_of_xs):
            classication_place[i,j] = sess.run(a1, {input_features:[[xx[i,j], yy[i,j]]]})

    cmap = ListedColormap([
        colorConverter.to_rgba('r',alpha=0.3),
        colorConverter.to_rgba('b',alpha=0.3),
        colorConverter.to_rgba('y',alpha=0.3)
    ])
    plt.contourf(xx, yy, classication_place, cmap=cmap)
    plt.show()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值