神经网络模块搭建(代码详解)

模块搭建

前向传播(forward()):搭建网络,设计网络结构

      def forward(x,regularizer):
          w=
          b=
          y=
          return y

#正则化权重
      def get_weight(shape,regularizer):
          w=tf.Variable()
          tf.add_to_collection('losses',tf.contrib.layers.l1_regularizer(regularizer)(w))#正则化缓解过拟合
          return w

      def get_bais(shape):
          b = tf.Variable()
          return b

反向传播(backward()):训练网络,优化网络参数

def backward():
    x = tf.placeholder()
    y_ = tf.placeholder()
    y = forward(x,REGULARIZER)
    global_step = tf.Variable(0,trainable=False)
    loss=

#正则化
loss可以是:
y与y_的差距(loss_mse)=tf.reduce_mean(tf.square(tf.square(y-y_))
也可以是:
ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
y与y_的差距(cem)=tf.reduce_mean(ce)

加入正则化后
loss=y与y_的差距+tf.add_n(tf.get_collection('losses'))

#指数衰减学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, 
                                           global_step, 
                                           数据及总样本/BATCH_SIZE, 
                                           LEARNING_RATE_DECAY, 
                                           staircase=True)

#定义训练过程
 train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)

#滑动平均
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
ema_op=ema.apply(tf.trainable_variables())
with tf.control_dependencies(train_step,ema_op]):
     train_op=tf.no_op(name='train')

#会话
 with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            sess.run(train_step,feed_dict={x:,y_:})
            if i%轮数 == 0:#每多少轮,打印出当前的loss值

                print()
if __name__=='__main__':
    backward()

完整代码及详解

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
seed = 2
def generateds():
    #基于seed产生随机数
    rdm = np.random.RandomState(seed)
    #随机数返回300行2列的矩阵,表示300组坐标点(x0,x1)作为输入数据
    X = rdm.randn(300,2)
    #从X这个300行2列的矩阵中取出一行,判断如果两个坐标的平方和小于2,给Y赋值1,否则为0
    #作为输入数据的标签
    Y_ = [int(x0*x0+x1*x1<2)for (x0,x1) in X]
    #遍历Y中的每一个元数,1赋值‘red'其余赋值‘blue’,这样可视化显示时人可以直观区别
    Y_c = [['red'if y else 'blue']for y in Y_]
    #对数据集X和标签Y进行形状整理,第一个元素为-1表示跟随第二例计算,第二个元素表示多少列,可见X为两列,Y为一列
    X = np.vstack(X).reshape(-1,2)
    Y_ = np.vstack(Y_).reshape(-1,1)
    return X,Y_,Y_c

#定义神经网络的输出,参数的输出,定义前向传播过程
def get_weight(shape,regularizer):
    w = tf.Variable(tf.random_normal(shape),dtype=tf.float32)
    tf.add_to_collection('losses',tf.contrib.layers.l1_regularizer(regularizer)(w))#正则化缓解过拟合
    return w

def get_bais(shape):
    b = tf.Variable(tf.constant(0.01,shape=shape))
    return b

def forward(x,regularizer):
    w1 = get_weight([2,11],regularizer)
    b1 = get_bais([11])
    y1 = tf.nn.relu(tf.matmul(x,w1)+b1)

    w2 = get_weight([11, 1],regularizer)
    b2 = get_bais([1])
    y = tf.matmul(y1,w2)+b2
    return y

STEPS = 60000
BATCH_SIZE = 50
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.001

def backward():
    x = tf.placeholder(tf.float32,shape=(None,2))
    y_ = tf.placeholder(tf.float32,shape=(None,1))

    X,Y_,Y_c = generateds()

    y = forward(x,REGULARIZER)

    global_step = tf.Variable(0,trainable=False)

    #指数衰减学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               300/BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)


    #定义损失函数,引入正则化的损失函数
    loss_mse = tf.reduce_mean(tf.square(y-y_))
    loss_total = loss_mse + tf.add_n(tf.get_collection('losses'))
    #定义反向传播方法,包括正则化
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_total)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i*BATCH_SIZE)%300
            end = start+BATCH_SIZE
            sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]})
            if i%2000 == 0:#每2000轮,打印出当前的loss值
                loss_v = sess.run(loss_total,feed_dict={x:X,y_:Y_})
                print("After %d steps,loss is:%f"%(i,loss_v))
        #画图
        xx,yy = np.mgrid[-3:3:.01,-3:3:.01]
        grid = np.c_[xx.ravel(),yy.ravel()]
        probs = sess.run(y,feed_dict={x:grid})
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c))
    plt.contour(xx,yy,probs,levels=[.5])
    plt.show()

if __name__=='__main__':
    backward()

运行结果

这里写图片描述
这里写图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值