lesson20 神经网络搭建八股

 

https://www.bilibili.com/video/av22530538/?p=20

#lesson20 神经网络搭建八股
#搭建模块化的神经网络八股:
#前向传播就是搭建网络,设计网络结构(forward.py)
#def forward(x, regularizer):
#    w=
#    b=
#    y=
#    return y

#def get_weight(shape, regularizer):
#    w = tf.Variable()
#    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer)(w)
#    return w

#def get_bias(shape)
#    b = tf.Variable(   )
#    reutrn b
#

#搭建模块化的神经网络八股:
#反向传播就是训练网络,优化网络参数(backward.py)
#  def backward():
#    x = tf.placeholder(   )
#    y_ = tf.placeholder(   )
#    y = forward.orward(x, REGULARIZER)
#    global_step = tf.Variable(0, trainable=False)
#    loss = 
     
#=================正则化:
#loss可以是:
#y与y_的差距(loss_max) = tf.reduce_mean(tf.square(y-y_))
#也可以是:
#ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
#y与y_的差距(cem)=tf.reduce_mean(ce)

#加入正则化后
#loss=y与y_的差距 + tf.add_n(tf.get_clooection('losses'))

#===============指数衰减学习率:
#learning_rate = tf.train.exponential_docay(
#    LEARNIG_RATE_BASE,
#    global_step,
##    数据集总样本数/BATCH_SIZE,
#    LEARNING_RATE_DECAY,
#    staircase=Trus)

#train_step = tf.train.GradientDescentOptimize(learning_rate).minimize(loss,
#            global_step=global_step)

#===============滑动平均
#ema=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
#ema_op = ema.apply(tf.trainable_variables())
#with tf.control_dependencies(train_step, ema_op):
#    train_op = tf.no_op(name='train')
    

#with tf.Session() as sess:
#    init_op = tf.global_variables_initializer()
#    sess.run(init_op)
    
#    for i in range(STEPS):
#        sess.run(train_step, feed_dict={x: ,y_: })
 #       if i%轮数 == 0:
#            print
            
#if __name__='__main__'
#    backward()
    
    
#生成数据集 generateds.py
#前行传播 forward.py
#反向传播 backward.py
 


#============================================
#生成数据集 generateds.py
#generateds.py
#导入模块,生成模拟数据集
import numpy as np 
import matplotlib.pyplot as plt
seed = 2
def generateds():
    #基于seed产生随机数
    rdm = np.random.RandomState(seed)
    #随机数返回300行2列的矩阵,表示300组坐标点(x0.x1)作文输入数据集
    X = rdm.randn(300,2)
    #从X这个300行2列的矩阵中取出一行,判断如果两个坐标的平方和小于2,给Y赋值1,其余赋值0
    #作为输入数据集的标签(正确答案)
    Y = [int(x0*x0 + x1*x1 <2) for (x0,x1) in X]
    #遍历Y中的每个元素,1赋值‘red’其他赋值‘blue’,这样可视化显示时人可以直观区分
    Y_c = [['red' if y else 'blue'] for y in Y_]
    #对数据集X和标签Y进行形状整理,第一个元素为-1表示跟随第二列计算,第二个元素表示多少列,
    #可见X为两列,Y为1列
    X = np.vstack(X).reshape(-1,2)
    Y_ = np.vstack(Y_).reshape(-1,1)
    return X,Y_,Y_c

#print(X)
#print(Y)
#print(Y_c)
#用plt.scatter画出数据集X各行中第0列元素和第一列元素的点即各行的(x0,x1),用各行Y_c对应
#的值表示颜色(c时color的缩写)
#plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c))
#plt.show()

#===============================================
#forward.py
import tensorflow as tf

#定义神经网络的输入,参数和输出,定义前向传输过程
def get_weight(shape, regularizer):
    w = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
    tf.add_to_collection('losses',tf.contrib.laters.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b = tf.Variable(tf.constant(0.01, shape=shape))
    return b

def forward(x, regularizer):
    w1 = get_weight([2,11], regularizer)
    b1 = get_bias([11])
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1)
    
    w2 = get_weight([11,1], regularizer)
    b2 = get_bias([1])
    y = tf.matmul(y1, w2) + b2 #输出层不过激活
    return y


#==============================================
#backward.py 反向传输过程
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#import generateds
#import forward

STEPS = 40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DELAY = 0.999
REAULARIZER = 0.01

seed = 2
def generateds():
    #基于seed产生随机数
    rdm = np.random.RandomState(seed)
    #随机数返回300行2列的矩阵,表示300组坐标点(x0.x1)作文输入数据集
    X = rdm.randn(300,2)
    #从X这个300行2列的矩阵中取出一行,判断如果两个坐标的平方和小于2,给Y赋值1,其余赋值0
    #作为输入数据集的标签(正确答案)
    Y_ = [int(x0*x0 + x1*x1 <2) for (x0,x1) in X]
    #遍历Y中的每个元素,1赋值‘red’其他赋值‘blue’,这样可视化显示时人可以直观区分
    Y_c = [['red' if y else 'blue'] for y in Y_]
    #对数据集X和标签Y进行形状整理,第一个元素为-1表示跟随第二列计算,第二个元素表示多少列,
    #可见X为两列,Y为1列
    X = np.vstack(X).reshape(-1,2)
    Y_ = np.vstack(Y_).reshape(-1,1)
    return X,Y_,Y_c

#定义神经网络的输入,参数和输出,定义前向传输过程
def get_weight(shape, regularizer):
    w = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b = tf.Variable(tf.constant(0.01, shape=shape))
    return b

def forward(x, regularizer):
    w1 = get_weight([2,11], regularizer)
    b1 = get_bias([11])
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1)
    
    w2 = get_weight([11,1], regularizer)
    b2 = get_bias([1])
    y = tf.matmul(y1, w2) + b2 #输出层不过激活
    return y

def backward():
    x = tf.placeholder(tf.float32, shape=(None, 2))
    y_ = tf.placeholder(tf.float32, shape=(None, 1))
    
    X, Y_, Y_c = generateds()
    y = forward(x, REAULARIZER)
    
    global_step = tf.Variable(0 ,trainable=False)
    
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        300/BATCH_SIZE,
        LEARNING_RATE_DELAY,
        staircase=True
    )
    
    #定义损失函数
    loss_mse = tf.reduce_mean(tf.square(y - y_))
    loss_total = loss_mse + tf.add_n([tf.get_collection('losses')])
    

    #定义反向传播方法:包含正则化
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)
    
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(STEPS):
            start = (i*BATCH_SIZE)%300
            end = start + BATCH_SIZE
            sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
            if i%2000 == 0:
                loss_v = sess.run(loss_total, feed_dict={x:X, y_:Y_})
                print("After %d steps,loss is :%f",i,loss_v)
        
        
        xx, yy = np.mgrid[-3:3:.01, -3:3:.01]
        grid = np.c_[xx.ravel(), yy.ravel()]
        probs = sess.run(y, feed_dict={x:grid})
        probs = probs.reshape(xx.shape)
    
    plt.scatter(X[:,0],X[:,1], c=np.squeeze(Y_c))
    plt.contour(xx, yy, probs, levels=[.5])
    plt.show()


if __name__=='__main__':
    backward()
('After %d steps,loss is :%f', 0, array([2.5668302, 1.5089796, 2.8902187, 1.86826  , 2.350546 , 1.8803072,
       2.02993  , 1.6785502, 2.2765093, 1.9777875, 1.8130226, 1.7081144,
       3.0880344, 1.5002894, 2.360821 , 2.2844257, 2.055157 , 1.4747636,
       1.2552826, 1.2468123, 1.2820654, 1.2078395, 1.3171003, 1.2813532,
       1.2867427, 1.2050648, 1.3131422, 1.2158824, 1.2640666, 1.2566302,
       1.2597053, 1.1940591], dtype=float32))
('After %d steps,loss is :%f', 2000, array([0.11640423, 0.0843224 , 0.16034338, 0.09978531, 0.10764869,
       0.13265155, 0.09050597, 0.10181741, 0.11102612, 0.11681367,
       0.08165098, 0.08410578, 0.2140159 , 0.08326143, 0.10304309,
       0.23503497, 0.13361564, 0.08139104, 0.08313721, 0.08707693,
       0.08981416, 0.08109941, 0.08609113, 0.08641067, 0.08542567,
       0.08087707, 0.08501144, 0.08107177, 0.08583382, 0.08696566,
       0.16831538, 0.10605212], dtype=float32))
('After %d steps,loss is :%f', 4000, array([0.06900894, 0.06897151, 0.06960355, 0.06899273, 0.06903058,
       0.06973132, 0.06897207, 0.06902046, 0.06899735, 0.06910895,
       0.06897138, 0.06897139, 0.06986102, 0.06897141, 0.06897403,
       0.08034473, 0.06984935, 0.06897138, 0.06897847, 0.06913827,
       0.06931876, 0.06897138, 0.06900185, 0.06898616, 0.06899941,
       0.06897138, 0.06899171, 0.06897138, 0.06903948, 0.06903877,
       0.1513815 , 0.09422117], dtype=float32))
('After %d steps,loss is :%f', 6000, array([0.06662133, 0.06662133, 0.06662133, 0.06662133, 0.06662133,
       0.06662133, 0.06662133, 0.06662133, 0.06662133, 0.06662133,
       0.06662133, 0.06662133, 0.06662133, 0.06662133, 0.06662133,
       0.06664878, 0.06662134, 0.06662133, 0.06662133, 0.06662134,
       0.06662144, 0.06662133, 0.06662133, 0.06662133, 0.06662133,
       0.06662133, 0.06662133, 0.06662133, 0.06662133, 0.06662133,
       0.14575696, 0.09205279], dtype=float32))
('After %d steps,loss is :%f', 8000, array([0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.06590896, 0.06590896, 0.06590896, 0.06590896, 0.06590896,
       0.14273688, 0.09171701], dtype=float32))
('After %d steps,loss is :%f', 10000, array([0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.06535169, 0.06535169, 0.06535169, 0.06535169, 0.06535169,
       0.13991964, 0.09166954], dtype=float32))
('After %d steps,loss is :%f', 12000, array([0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.06512178, 0.06512178, 0.06512178, 0.06512178, 0.06512178,
       0.13719082, 0.09161621], dtype=float32))
('After %d steps,loss is :%f', 14000, array([0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.06500839, 0.06500839, 0.06500839, 0.06500839, 0.06500839,
       0.1346513 , 0.09156334], dtype=float32))
('After %d steps,loss is :%f', 16000, array([0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.06486929, 0.06486929, 0.06486929, 0.06486929, 0.06486929,
       0.13226455, 0.09145759], dtype=float32))
('After %d steps,loss is :%f', 18000, array([0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.06449515, 0.06449515, 0.06449515, 0.06449515, 0.06449515,
       0.12961197, 0.09132565], dtype=float32))
('After %d steps,loss is :%f', 20000, array([0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.06434211, 0.06434211, 0.06434211, 0.06434211, 0.06434211,
       0.12742466, 0.09139441], dtype=float32))
('After %d steps,loss is :%f', 22000, array([0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.06424362, 0.06424362, 0.06424362, 0.06424362, 0.06424362,
       0.1256089 , 0.09149027], dtype=float32))
('After %d steps,loss is :%f', 24000, array([0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 , 0.0637516 ,
       0.12329023, 0.09123979], dtype=float32))
('After %d steps,loss is :%f', 26000, array([0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.06317926, 0.06317926, 0.06317926, 0.06317926, 0.06317926,
       0.12079911, 0.0911139 ], dtype=float32))
('After %d steps,loss is :%f', 28000, array([0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.06305233, 0.06305233, 0.06305233, 0.06305233, 0.06305233,
       0.11917313, 0.09124077], dtype=float32))
('After %d steps,loss is :%f', 30000, array([0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.06297403, 0.06297403, 0.06297403, 0.06297403, 0.06297403,
       0.11783044, 0.09133194], dtype=float32))
('After %d steps,loss is :%f', 32000, array([0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.062908  , 0.062908  , 0.062908  , 0.062908  , 0.062908  ,
       0.11658072, 0.09141951], dtype=float32))
('After %d steps,loss is :%f', 34000, array([0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.06284633, 0.06284633, 0.06284633, 0.06284633, 0.06284633,
       0.11543632, 0.091515  ], dtype=float32))
('After %d steps,loss is :%f', 36000, array([0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.06278987, 0.06278987, 0.06278987, 0.06278987, 0.06278987,
       0.11436763, 0.09161176], dtype=float32))
('After %d steps,loss is :%f', 38000, array([0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.06274156, 0.06274156, 0.06274156, 0.06274156, 0.06274156,
       0.11338855, 0.09172283], dtype=float32))

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值