神经网络完成分类(正则化)

了解Tensorflow入门非常推荐MOOC网上的这个课程:

人工智能——Tensorflow笔记

将蓝点、红点用一条曲线分隔开开。

# -*- coding: utf-8 -*-
"""
Created on Thu Aug  1 20:06:08 2019

@author: ushop
"""

'''
正则化缓解过拟合:
    正则化在损失函数中引入模型复杂度指标
    利用给W加权值,弱化了数据噪声(一般正则化b)
    loss = loss(y-y_)+REGULARIZER * loss(W)
    
    loss(y_-y):模型中所有参数的损失函数(交叉熵、均方误差)
    REGULARIZER:给出参数W在loss中的比例,正则化的权重
    
    loss(l1)(W) = sum(|Wi|)
    loss(W)=tf.contrib.layers.l1_regularizer(REGULARIZER)(W)
    loss(l2)(W) = sum(|Wi|^2)
    loss(W)=tf.contrib.layers.l2_regularizer(REGULARIZER)(W) 
    
    #把内容加到集合对应位置做加法
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(REGULARIZER)(W)
    
    loss = cem + tf.add_n(tf.get_collection('losses'))
'''
import tensorflow as tf
import pylab
import numpy as np

BATCH_SIZE = 30
seed =2

#基于seed产生随机数
rdm = np.random.RandomState(seed)
#随机数返回300*2的矩阵 作为输入数据[X0,X1]
X = rdm.randn(300,2)
#计算出标准答案
Y_ = [int(X0*X0+X1*X1<2) for (X0,X1) in X]
#遍历Y_中的值 1-'red' 0-'blue'
Y_c = [['red' if y else 'blue']for y in Y_]
#print(Y_c)
#将维度操作
#print(np.squeeze(Y_c))
#对数据集X和标签Y进行shape处理,第一个元素为-1,随第二个元素计算得
X = np.vstack(X).reshape(-1,2)
Y_ = np.vstack(Y_).reshape(-1,1)
#print(X,Y_)
#用pylab.scatter绘制出数据姐X的离散点(X0,X1),用Y_c中的颜色对应
pylab.scatter(X[:,0], X[:,1], c = np.squeeze(Y_c))
pylab.show()

#定义神经网络的输入、参数、输出,定义向前传播过程
def get_weight(shape, regularizer):
    w = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
    tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b = tf.Variable(tf.constant(0.01,shape=shape))
    return b

#创建输入变量
x = tf.placeholder(tf.float32, shape=(None,2))
y_ = tf.placeholder(tf.float32, shape=(None,1))

#生成隐藏层
w1 = get_weight([2,11], 0.01)
b1 = get_bias([11])
y1 = tf.nn.relu(tf.matmul(x, w1) + b1)

w2 = get_weight([11, 1], 0.01)
b2 = get_bias([1])
#输出层不激活
y = tf.matmul(y1, w2) + b2

#定义损失函数
loss_mse = tf.reduce_mean(tf.square(y - y_))
loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))

#定义反向传播方法:不含正则化
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_mse)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    STEPS = 40000
    for i in range(STEPS):
        start = (i*BATCH_SIZE) % 300
        end = start + BATCH_SIZE
        sess.run(train_step, feed_dict={x:X[start:end], y_:Y_[start:end]})
        if i%2000==0:
            loss_mse_v = sess.run(loss_mse, feed_dict={x:X,y_:Y_})
            print("After %d steps, loss is %f" %(i,loss_mse_v))
   
    #三维作图
    
    #xx在-3到3之间以步长为0.01,yy在-3到3之间以步长为0.01,生成二维网格坐标点
    xx,yy = np.mgrid[-3:3:0.01, -3:3:0.01]        
    #将xx,yy拉直,并合成一个2列的矩阵 得到一个网格坐标点的集合
    grid = np.c_[xx.ravel(),yy.ravel()]
    #将网格坐标点喂入神经网络 probs为输出
    probs = sess.run(y, feed_dict={x:grid})
    #prods的shape调整成xx的样子
    probs = probs.reshape(xx.shape)
    
    #print("w1:\n",sess.run(w1))
    #print("b1:\n",sess.run(b1))
    #print("w2:\n",sess.run(w2))
    #print("b2:\n",sess.run(b2))
    
pylab.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c))
pylab.contour(xx, yy ,probs, levels=[.5])
pylab.show()    

#定义反向传播方向:正则化
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_total)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    STEPS = 40000
    for i in range(STEPS):
        start = (i*BATCH_SIZE) % 300
        end = start + BATCH_SIZE
        sess.run(train_step, feed_dict={x:X[start:end],y_:Y_[start:end]})
       
        if i % 2000 == 0:
            loss_total_v = sess.run(loss_total, feed_dict={x:X, y_:Y_})
            print("After %d stpes loss_total is %f" % (i,loss_total_v))
        
    xx,yy = np.mgrid[-3:3:0.01, -3:3:0.01]
    grid = np.c_[xx.ravel(), yy.ravel()]
    probs = sess.run(y, feed_dict={x:grid})
    probs = probs.reshape(xx.shape)
        

    #print("w1:\n",sess.run(w1))
    #print("b1:\n",sess.run(b1))
    #print("w2:\n",sess.run(w2))
    #print("b2:\n",sess.run(b2))

pylab.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c))
pylab.contour(xx,yy,probs, levels=[.5])
pylab.show()

非正则化结果                                    

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值