def P_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
#define common param
#关于slim.arg_scope的用法大家可以自行查找,网上有很多关于这个的介绍
with slim.arg_scope([slim.conv2d],
activation_fn=prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
print(inputs.get_shape())
#输出input的shape
net = slim.conv2d(inputs, 10, 3, stride=1,scope='conv1')
#卷积操作,卷积核的个数是10,卷积核的形式是[3,3],步长为1,其余的参数和上面的slim.arg_scope一样
_activation_summary(net)
#数据的记录(具体的解释看本博客的下一个函数解释)
print(net.get_shape())
#输出net的shape
net = slim.max_pool2d(net, kernel_size=[2,2], stride=2, scope='pool1', padding='SAME')
#池化
_activation_summary(net)
#同上
print(net.get_shape())
#同上
net = slim.conv2d(net,num_outputs=16,kernel_size=[3,3],stride=1,scope='conv2')
#卷积
_activation_summary(net)
#同上
print(net.get_shape())
#同上
net = slim.conv2d(net,num_outputs=32,kernel_size=[3,3],stride=1,scope='conv3')
#卷积
_activation_summary(net)
#同上
print(net.get_shape())
#同上
#batch*H*W*2
conv4_1 = slim.conv2d(net,num_outputs=2,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.softmax)
#卷积;这里用了softmax函数,是个二分类的问题
_activation_summary(conv4_1)
#同上
#conv4_1 = slim.conv2d(net,num_outputs=1,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.sigmoid)
print (conv4_1.get_shape())
#输出的shape是[batch,1,1,2]
#batch*H*W*4,(bbox的输入shape)
bbox_pred = slim.conv2d(net,num_outputs=4,kernel_size=[1,1],stride=1,scope='conv4_2',activation_fn=None)
_activation_summary(bbox_pred)
print (bbox_pred.get_shape())
#batch*H*W*10(landmark的输入shape)
landmark_pred = slim.conv2d(net,num_outputs=10,kernel_size=[1,1],stride=1,scope='conv4_3',activation_fn=None)
_activation_summary(landmark_pred)
print (landmark_pred.get_shape())
# add projectors for visualization
#cls_prob_original = conv4_1
#bbox_pred_original = bbox_pred
#training=True
if training:
#batch*2
# calculate classification loss
cls_prob = tf.squeeze(conv4_1,[1,2],name='cls_prob')
#tf.squeeze()删除conv4_1中所指定位置大小是1的维度,shape是[batch,2]
cls_loss = cls_ohem(cls_prob,label)
#获得人脸分类训练的loss值
#batch
# cal bounding box error, squared sum error
#获得人脸框训练的loss值
bbox_pred = tf.squeeze(bbox_pred,[1,2],name='bbox_pred')
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
#batch*10
#获得关键点训练的loss值
landmark_pred = tf.squeeze(landmark_pred,[1,2],name="landmark_pred")
landmark_loss = landmark_ohem(landmark_pred,landmark_target,label)
#获得正则化损失和分类训练的准确率
accuracy = cal_accuracy(cls_prob,label)
L2_loss = tf.add_n(slim.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
#test
else:
#when test,batch_size = 1
cls_pro_test = tf.squeeze(conv4_1, axis=0)
bbox_pred_test = tf.squeeze(bbox_pred,axis=0)
landmark_pred_test = tf.squeeze(landmark_pred,axis=0)
return cls_pro_test,bbox_pred_test,landmark_pred_test