深度学习前奏——正则化

废话 不多说,直接上代码:


```python
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt;

BATCH_SIZE = 30  # 30个网格坐标点
seed = 2

# 基于seed产生随机数
rdm = np.random.RandomState(seed)
X = rdm.randn(300, 2)
#
Y_ = [int(x0 * x0 + x1 * x1 < 2) for (x0, x1) in X]

Y_c = [['red' if y else 'blue'] for y in Y_]

X = np.vstack(X).reshape(-1, 2)
Y_ = np.vstack(Y_).reshape(-1, 1)

print(X)
print(Y_)
print(Y_c)

plt.scatter(X[:, 0], X[:, 1], c=np.squeeze(Y_c))
plt.show()
plt.figure(num=1, figsize=(12, 8))
# plt.imshow(image_np)
# plt.show()  #这句是显示检测好的图片。如果加这句,下面的保存图片时,会是空图,不回,保存的图片是正确的,速度也快
plt.savefig('c1' + '_labeled.jpg')


def get_weight(shape, regularizer):
    w = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
    tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w


def get_bias(shape):
    b = tf.Variable(tf.constant(0.01, shape=shape))
    return b


x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))

w1 = get_weight([2, 11], 0.01)
b1 = get_bias([11])
y1 = tf.nn.relu(tf.matmul(x, w1) + b1)

w2 = get_weight([11, 1], 0.01)
b2 = get_bias([1])
y = tf.matmul(y1, w2) + b2

loss_mse = tf.reduce_mean(tf.square(y - y_))
loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))

train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mse)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # 一共    轮数
    STEPS = 40000
    for i in range(STEPS):
        start = (i * BATCH_SIZE) % 300
        end = start + BATCH_SIZE
        # 每轮从 X数据集和Y的标签中,选取 从start到end喂入神经网络   用sess.run进行训练
        sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
        # 每2000轮打印出loss值
        if (i % 2000 == 0):
            loss_mse_v = sess.run(loss_mse, feed_dict={x: X, y_: Y_})
            print("After %d training step(s),loss on all data is %g" % (i, loss_mse_v))

    xx, yy = np.mgrid[-3:3:0.01, -3:3:0.01]

    grid = np.c_[xx.ravel(), yy.ravel()]

    probs = sess.run(y, feed_dict={x: grid})

    probs = probs.reshape(xx.shape)

    # 输出训练后的数据
    print("\n")
    print("w1:\n", sess.run(w1))
    print("b1:\n", sess.run(b1))
    print("w2:\n", sess.run(w2))
    print("b2:\n", sess.run(b2))

#使用正则化后
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_total)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # 一共    轮数
    STEPS = 40000
    for i in range(STEPS):
        start = (i * BATCH_SIZE) % 300
        end = start + BATCH_SIZE
        # 每轮从 X数据集和Y的标签中,选取 从start到end喂入神经网络   用sess.run进行训练
        sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
        # 每2000轮打印出loss值
        if (i % 2000 == 0):
            loss_v = sess.run(loss_total, feed_dict={x: X, y_: Y_})
            print("After %d training step(s),loss on all data is %g" % (i, loss_v))

    xx, yy = np.mgrid[-3:3:0.01, -3:3:0.01]

    grid = np.c_[xx.ravel(), yy.ravel()]

    probs = sess.run(y, feed_dict={x: grid})

    probs = probs.reshape(xx.shape)

    # 输出训练后的数据
    print("\n")
    print("w1:\n", sess.run(w1))
    print("b1:\n", sess.run(b1))
    print("w2:\n", sess.run(w2))
    print("b2:\n", sess.run(b2))

plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c))
plt.contour(xx,yy,probs,levels= [.5])
plt.show()

得出图片如下:
在这里插入图片描述
经过学习之后 没有得到 正则化 训练出来的模型 如下图:

在这里插入图片描述

经过正则化训练出来的模型:

在这里插入图片描述

由此可见 经过正则化之后 ,得到的分界会更加圆润不少。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值