以下代码是使用tensorflow库,并使用模块化的工程思想,用来搭建神经网络的八股(即必须的基本函数和参数)。补全以下代码,可以实现简单的神经网络的搭建。
注:python3.6
# 搭建模块化的神经网络八股:
# 前向传播就是搭建网络。设计网络结构(forword.py)
def forward(x, regularizer):
w =
b =
y =
return y
def get_weight(shape, regularizer):
w = tf.Variable()
tf.add_to_collection('losses', tf.contrib.l2_regularizer(regularizer)(w))
return w
# shape表示b的形状,就是某层中b的个数
def get_bias(shape):
b = tf.Variable()
return b
# 反向传播就是训练网络,优化网络参数(backward.py)
def backward():
x = tf.placeholder()
y_ = tf.placeholder()
y = forward.forward(x, REGULARIZER)
# 轮数计数器
global_step = tf.Variable(0, trainable=False)
loss =
'''
正则化:
loss可以是:
均方误差:y与y_的差距(loss_mse) = tf.reduce_mean(tf.square(y-y_))
交叉熵:ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
y与y_的差距(cem) = tf.reduce_mean(ce)
加入正则化后,则还要加上:
loss = y与y_的差距 + tf.add_n(tf.get_collection('losses'))
'''
# 若使用,指数衰减学习率,则加上:
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
数据样本数/BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step}
# 滑动平均:
ema = tf.train.ExponentialMovingAverage(MOVlNG_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables()}
with tf.control_dependencies([train_step, ema.op]):
train.op = tf.no_op(name='train')
with tf.Session() as sess:
init.op = tf.global_Variables_initializer()
sess.run(init_op)
for i in range(STEPS):
sess.run(train_step, feed_dict={x:, y_: })
if i % 轮数 == 0:
print()
# 判断python运行的文件是否是主文件,若是主文件,则执行backward()函数
if __name__ == '__main__':
backward()
转自:北京大学 人工智能实践:Tensorflow笔记