RPN的实现之神经网络部分的实现
RPN的框架:
-
图解
- 上支路是所有anchor的修正量(4*9)
- 下支路是前景背景9个anchor的分类得分(前景 背景)
-
rpn_match
- label为1的anchor: 当一个anchor与真实bounding box的最大IOU超过阈值Vt1(0.7)
- label为-1的anchor : 当一个anchor与真实bounding box的最大IOU低于阈值Vt2(0.3)
- label为0的anchor : 当一个anchor与真实bounding box的最大IOU介于Vt2与Vt1之间
- Negative anchor 与 Positive anchor 的数量之和是一个人为设置的常数
-
rpn_bbox
- Input_rpn_bbox 是anchor和真实bbox之间的偏移量,RPN网络计算的也是偏移量!
- 只有positive anchor才有对应的Input_rpn_bbox
ResNet
- block
- 保证跳远连接层和最后一层输入层的长宽以及通道数一样,可以实现相加操作
- 保证跳远连接层和最后一层输入层的长宽以及通道数一样,可以实现相加操作
- 架构
import keras.layers as KL
from keras.models import Model
import keras.backend as K
import tensorflow as tf
构建block
def building_block(filters,block):
if block !=0 :
stride=1
else:
stride=2
def f(x):
# 主通路
y=KL.Conv2D(filters,(1,1),strides=stride)(x)
y=KL.BatchNormalization(axis=3)(y)
y=KL.Activation("relu")(y)
y=KL.Conv2D(filters,(3,3),padding="same")(y)
y=KL.BatchNormalization(axis=3)(y)
y=KL.Activation("relu")(y)
y=KL.Conv2D(4*filters,(1,1))(y)
y=KL.BatchNormalization(axis=3)(y)
if block==0 :
shortcut=KL.Conv2D(4*filters,(1,1),strides=stride)(x)
shortcut=KL.BatchNormalization(axis=3)(shortcut)
else:
shortcut=x
y=KL.Add()([y,shortcut])
y=KL.Activation("relu")(y)
return y
return f
构建resnet
def resNet_featureExtractor(inputs):
x=KL.Conv2D(64,(3,3),padding="same")(inputs)
x=KL.BatchNormalization(axis=3)(x)
x=KL.Activation("relu")(x)
filters=64
# 每一个stage的block的个数 每个stage中 第一个block是block1 其他的是block2
blocks=[3,6,4]
for i,block_num in enumerate(blocks):
for block_id in range(block_num):
x=building_block(filters,block_id)(x)
filters *=2
return x
x=KL.Input((64,64,3))
y=resNet_featureExtractor(x)
model=Model([x],[y])
model.summary()
__________________________________________________________________________________________________
Total params: 6,902,656
Trainable params: 6,875,136
Non-trainable params: 27,520
__________________________________________________________________________________________________
from keras.utils.vis_utils import plot_model
plot_model(model,to_file="images/rpn_resnet_model.png",show_shapes=True)
CNN网络构建完成后,实现后续rpn
def rpn_net(inputs,k):
shared_map=KL.Conv2D(256,(3,3),padding="same")(inputs)
shared_map=KL.Activation("linear")(shared_map)
# 下支路
rpn_class=KL.Conv2D(2*k,(1,1))(shared_map)
rpn_class=KL.Lambda(lambda x:tf.reshape(x,[tf.shape(rpn_class)[0],-1,2]))(rpn_class)
rpn_class=KL.Activation("linear")(rpn_class)
rpn_prob=KL.Activation("softmax")(rpn_class)
#上支路
y=KL.Conv2D(4*k,(1,1))(shared_map)
y=KL.Activation("linear")(y)
rpn_bbox=KL.Lambda(lambda x:tf.reshape(x,[tf.shape(x)[0],-1,4]))(y)
return rpn_class,rpn_prob,rpn_bbox
x=KL.Input((64,64,3))
fp=resNet_featureExtractor(x)
rpn_class,rpn_prob,rpn_bbox=rpn_net(fp,9)
rpn_model=Model([x],[rpn_class,rpn_prob,rpn_bbox])
rpn_model.summary()
__________________________________________________________________________________________________
Total params: 9,276,086
Trainable params: 9,248,566
Non-trainable params: 27,520
__________________________________________________________________________________________________
plot_model(rpn_model,to_file="images/rpn_model.png",show_shapes=True)