网络架构:卷积层块-max-pool,卷积层块-max-pool,....,卷积层块-max-pool,全连接层,全连接层,。。全连接层,softmax
VGG.py(加入了L2-REG、BN层,BN层的顺序:x->bn->relu->conv(fc))
import tensorflow as tf
"""
(1)构造函数__init__参数
input_sz: 输入层placeholder的4-D shape,如mnist是[None,28,28,1]
fc_layers: 全连接层每一层大小,接在卷积层后面。如mnist可以为[128,84,10],[10]
conv_info: 卷积层、池化层。
如vgg16可以这样写:[(2,64),(2,128),(3,256),(3,512),(3,512)],表示2+2+3+3+3=13个卷积层,4个池化层,以及channels
(2)train函数:训练一步
batch_input: 输入的batch
batch_output: label
learning_rate:学习率
返回:正确率和loss值(float) 格式:{"accuracy":accuracy,"loss":loss}
(3)forward:训练后用于测试
(4)save(save_path,steps)保存模型
(5)restore(path):从文件夹中读取最后一个模型
(6)loss函数使用cross-entrop one-hot版本:y*log(y_net)
(7)optimizer使用adamoptimier
"""
class VGG: #VGG分类器
sess=None
#Tensor
input=None
output=None
desired_out=None
loss=None
iscorrect=None
accuracy=None
optimizer=None
#参数
learning_rate=None
MOMENTUM = 0.9
WEIGHT_DECAY = 1e-4 #L2 REGULARIZATION
ACTIVATE = None
CONV_PADDING = "SAME"
MAX_POOL_PADDING = "SAME"
CONV_WEIGHT_INITAILIZER = tf.truncated_normal_initializer(stddev=0.1)
CONV_BIAS_INITAILIZER = tf.constant_initializer(value=0.0)
FC_WEIGHT_INITAILIZER = tf.truncated_normal_initializer(stddev=0.1)
FC_BIAS_INITAILIZER = tf.constant_initializer(value=0.0)
def getParameterNum(self): #可训练参数个数
param_tot=0
for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
param=1
shape=var.get_shape()
for i in range(0,len(shape)):
param*=shape[i].value
param_tot+=param
return param_tot
def HyperParameter(self):
with tf.variable_scope("HyperParameter"):
self.learning_rate=tf.placeholder(tf.float32,name="learning_rate")
#weight_decay=tf.constant(self.WEIGHT_DECAY,tf.float32,name="weight_decay")
#momentum=tf.constant(self.MOMENTUM,tf.float32,name="momentum")
def train(self,batch_input,batch_output,learning_rate):
_,accuracy,loss=self.sess.run([self.optim