上一篇文章,实现了网络的输入,这次继续完成网络的训练,网络采用VGG16的结构。其中为了方便,keep_prob无论是训练还是测试,都这成了1,大家应该根据需要feed进不同的值。网络的输入TFRecord.createBatch(),为上一篇文章中产生数据的方法。
1.定义网络参数
import tensorflow as tf
import numpy as np
import TFRecord
#定义网络参数
learning_rate = 0.001
display_step = 5
epochs = 10
keep_prob = 0.5
2.定义各种类型的层
#定义卷积操作
def conv_op(input_op, name, kh, kw, n_out, dh, dw):
input_op = tf.convert_to_tensor(input_op)
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape = [kh, kw, n_in, n_out],
dtype = tf.float32,
initializer = tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding = 'SAME')
bias_init_val = tf.constant(0.0, shape = [n_out], dtype = tf.float32)
biases = tf.Variable(bias_init_val, trainable = True, name = 'b')
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name = scope)
return activation
#定义全连接操作
def fc_op(input_op, name, n_out):
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope&