前言:暑期实习做了一些目标检测方面的工作,按照目标检测的发展史也逐步进行了学习,但是总感觉看论文只是纸上谈兵,训练模型也只是调用模块中的函数fit别人准备好的数据,这个过程中没有自己的东西。恰好呢看了Faster RCNN的论文,Faster RCNN又是一个使用anchor、RPN网络、端到端训练的经典的算法,又想学习一下tensorflow,更恰巧手边有一些实际项目的数据,所以历时一个月左右,管理零碎的时间有限的资源跑通了Faster RCNN网络,取得了理想的效果。
当然也不是完全自己复现,搭建网络过程中参考了这位老哥的文章,对其中的实现细节进行了更深入的了解,并对训练过程中出现的问题以及解决方法进行了进一步记录。
1、utils.py实用程序函数说明
导入需要的包,wandhG数组存放9个anchor先验框的高宽尺寸,是基于训练数据集中的gt框进行聚类生成的(聚类生成先验anchor框)。输入图片的尺寸为512*512,可自行调整,想计算速度快一点的就设置小一点的图像尺寸。
import numpy as np
import cv2
from xml.dom.minidom import parse
import tensorflow as tf
# box width and height
wandhG = np.array([[ 45.5 , 48.47058824],
[ 48.5 , 105.17647059],
[ 91.5 , 76.23529412],
[ 60., 103.52941177],
[112.25 , 48.],
[ 75. , 96. ],
[ 24. , 26.82352941],
[107. , 61.17647059],
[ 87. , 26.35294118]], dtype=np.float32)
image_height = 512
image_width = 512
load_gt_boxes函数将图片的标注文件进行解析,可解析labelimg标注的xml文件以及yolov格式的txt文件,最终返回一张图像上的多个gt框的label以及左上和右下角坐标。
def load_gt_boxes(path):
'''
load the ground truth bounding box info: label, xmin, ymin, xmax, ymax
'''
## parse xml file
# dom_tree = parse(path)
# root element
# root_node = dom_tree.documentElement
# print('root node', root_node.nodeName)
# # extract image size
# size = root_node.getElementsByTagName('size')
# # size info
# width = size[0].getElementsByTagName('width')[0].childNodes[0].data
# height = size[0].getElementsByTagName('height')[0].childNodes[0].data
# depth = size[0].getElementsByTagName('depth')[0].childNodes[0].data
# print([int(width), int(height), int(depth)])
# extract BB objects
# objects = root_node.getElementsByTagName('object')
# boxes = []
# for obj in objects:
# # name = obj.getElementsByTagName('name')[0].childNodes[0].data
# bndbox = obj.getElementsByTagName('bndbox')[0]
# xmin = int(bndbox.getElementsByTagName('xmin')[0].childNodes[0].data)
# ymin = int(bndbox.getElementsByTagName('ymin')[0].childNodes[0].data)
# xmax = int(bndbox.getElementsByTagName('xmax')[0].childNodes[0].data)
# ymax = int(bndbox.getElementsByTagName('ymax')[0].childNodes[0].data)
# # w = np.abs(xmax - xmin)
# # h = np.abs(ymax - ymin)
# boxes.append([xmin, ymin, xmax, ymax])
# boxes = np.array(boxes)
# return boxes
## parse txt files
boxes = []
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
data = line.split(' ')
x_center = np.float64(data[1])*2*image_width
y_center = np.float64(data[2])*2*image_height
w = np.float64(data[3])*image_width
h = np.float64(data[4])*image_height
xmin = (x_center - w)/2
xmax = (x_center + w)/2
ymin = (y_center - h)/2
ymax = (y_center + h)/2
boxes.append([xmin, ymin, xmax, ymax])
return boxes
plot_boxes_on_image函数将boxes坐标绘制在图片上,并返回RGB格式的图像。(可测试坐标数据解析是否正确)
def plot_boxes_on_image(image_with_boxes, boxes, thickness=2, color=[255, 0, 0]):
'''plot boxes on image'''
boxes = np.array(boxes).astype(np.int32)
for box in boxes:
cv2.rectangle(image_with_boxes, pt1=(box[0], box[1]), pt2=(box[2], box[3]), color=color, thickness=thickness)
image_with_boxes = cv2.cvtColor(image_with_boxes, cv2.COLOR_BGR2RGB)
return image_with_boxes
compute_iou计算两个坐标框的交并比,iou是衡量预测框和gt框的重合和接近程度,iou越接近1,预测框和gt框越接近。
def compute_iou(box1, box2):
"""
compute the IOU(Intersection Over Union)
:param box1:
:param box2:
:return: iou
"""
w_1 = box1[2] - box1[0]
h_1 = box1[3] - box1[1]
w_2 = box2[2] - box2[0]
h_2 = box2[3] - box2[1]
x = [box1[0], box1[2], box2[0], box2[2]]
y = [box1[1], box1[3], box2[1], box2[3]]
delta_x = np.max(x) - np.min(x)
delta_y = np.max(y) - np.min(y)
w_in = w_1 + w_2 - delta_x
h_in = h_1 + h_2 - delta_y
if w_in <= 0 or h_in <= 0:
iou = 0
else:
area_in = w_in*h_in
area_un = w_1*h_1 + w_2*h_2 - area_in
iou = area_in/area_un
return iou
regression_box_shift函数计算检测到目标并且得分大于positive_threshold,于gt框的交并比大于iou阈值的proposal框向ground_truth框的变换量,tx,ty为坐标平移量,tw,th为高度和宽度的缩放量。一定要注意变换的顺序,要不然训练和测试的时候会发现候选框离目标框越来越远,得分越来越低,loss越来越爆炸。
def regression_box_shift(p, g):
"""
compute t to transform p to g
:param p: proposal box
:param g: ground truth
:return: t
"""
w_p = p[2] - p[0]
h_p = p[3] - p[1]
w_g = g[2] - g[0]
h_g = g[3] - g[1]
tx = (g[0] - p[0])/w_p
ty = (g[1] - p[1])/h_p
tw = np.log(w_g/w_p)
th = np.log(h_g/h_p)
t = [tx, ty, tw, th]
return t
output_decode函数对预测的boxes和得分进行解码。根据Faster RCNN的网络结构,图像经过backbone网络进行了4次Maxpool,最后得到的feature map大小为输入图像尺寸的十六分之一,也就是512/16=32。feature map中的每一个像素对应原输入图像上的一个16*16大小的grid。此函数先计算原输入图像上的每个grid的中心坐标,以及以此坐标为中心的9个anchor框的坐标。再将anchor先验框与预测得到的变换量进行变换得到所有anchor的预测框,在经过预测框得分的阈值筛选,得到最终的预测框和对应得分。
def output_decode(pred_bboxes, pred_scores, score_thresh=0.5):
grid_x, grid_y = tf.range(32, dtype=tf.int32), tf.range(32, dtype=tf.int32)
grid_x, grid_y = tf.meshgrid(grid_x, grid_y)
grid_x, grid_y = tf.expand_dims(grid_x, -1), tf.expand_dims(grid_y, -1)
grid_xy = tf.stack([grid_x, grid_y], axis=-1)
center_xy = grid_xy * 16 + 8
center_xy = tf.cast(center_xy, tf.float32)
anchor_xymin = center_xy - 0.5 * wandhG
anchor_xymin = np.expand_dims(anchor_xymin, axis=0)
# print(anchor_xymin.shape)
xy_min = pred_bboxes[..., 0:2] * wandhG[:, 0:2] + anchor_xymin
xy_max = tf.exp(pred_bboxes[..., 2:4]) * wandhG[:, 0:2] + xy_min
pred_bboxes = tf.concat([xy_min, xy_max], axis=-1)
pred_scores = pred_scores[..., 1]
score_mask = pred_scores > score_thresh
pred_bboxes = tf.reshape(pred_bboxes[score_mask], shape=[-1, 4]).numpy()
pred_scores = tf.reshape(pred_scores[score_mask], shape=[-1, ]).numpy()
return pred_bboxes, pred_scores