sort算法

参考链接:https://blog.csdn.net/c20081052/article/details/93488032

有关SORT的论文早先就已经拜读过了,一直想写这篇文章的源码解析,终于有时间来写了。

论文解读请参考:SIMPLE ONLINE AND REALTIME TRACKING (SORT)论文阅读笔记

 

论文地址:https://arxiv.org/abs/1602.00763

github地址:https://github.com/abewley/sort


以下用到的图片转自HaoBBNuanMM, 目的是为了更好的分析源码流程。

 


OK,接下来是源码分析,下载下来的项目代码中有如下内容:

主要就sort代码进行解析~

sort.py


  
  
  1. """
  2. SORT: A Simple, Online and Realtime Tracker
  3. Copyright (C) 2016 Alex Bewley alex@dynamicdetection.com
  4. This program is free software: you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation, either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program. If not, see <http://www.gnu.org/licenses/>.
  14. """
  15. from __future__ import print_function
  16. from numba import jit #是python的一个JIT库,通过装饰器来实现运行时的加速
  17. import os.path
  18. import numpy as np
  19. import matplotlib.pyplot as plt
  20. import matplotlib.patches as patches #用于绘制常见图像(如矩形,椭圆,圆形,多边形)
  21. from skimage import io
  22. from sklearn.utils.linear_assignment_ import linear_assignment
  23. import glob
  24. import time
  25. import argparse
  26. from filterpy.kalman import KalmanFilter #filterpy包含了一些常用滤波器的库
  27. @jit #用了jit装饰器,可加速for循环的计算
  28. def iou( bb_test,bb_gt):
  29. """
  30. Computes IOU between two bboxes in the form [x1,y1,x2,y2]
  31. """
  32. xx1 = np.maximum(bb_test[ 0], bb_gt[ 0])
  33. yy1 = np.maximum(bb_test[ 1], bb_gt[ 1])
  34. xx2 = np.minimum(bb_test[ 2], bb_gt[ 2])
  35. yy2 = np.minimum(bb_test[ 3], bb_gt[ 3])
  36. w = np.maximum( 0., xx2 - xx1)
  37. h = np.maximum( 0., yy2 - yy1)
  38. wh = w * h
  39. o = wh / ((bb_test[ 2]-bb_test[ 0])*(bb_test[ 3]-bb_test[ 1]) #IOU=(bb_test和bb_gt框相交部分面积)/(bb_test框面积+bb_gt框面积 - 两者相交面积)
  40. + (bb_gt[ 2]-bb_gt[ 0])*(bb_gt[ 3]-bb_gt[ 1]) - wh)
  41. return(o)
  42. def convert_bbox_to_z( bbox): #将bbox由[x1,y1,x2,y2]形式转为 [框中心点x,框中心点y,框面积s,宽高比例r]^T
  43. """
  44. Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
  45. [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
  46. the aspect ratio
  47. """
  48. w = bbox[ 2]-bbox[ 0]
  49. h = bbox[ 3]-bbox[ 1]
  50. x = bbox[ 0]+w/ 2.
  51. y = bbox[ 1]+h/ 2.
  52. s = w*h #scale is just area
  53. r = w/ float(h)
  54. return np.array([x,y,s,r]).reshape(( 4, 1)) #将数组转为4行一列形式,即[x,y,s,r]^T
  55. def convert_x_to_bbox( x,score=None): #将[x,y,s,r]形式的bbox,转为[x1,y1,x2,y2]形式
  56. """
  57. Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
  58. [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
  59. """
  60. w = np.sqrt(x[ 2]*x[ 3]) #w=sqrt(w*h * w/h)
  61. h = x[ 2]/w #h=w*h/w
  62. if(score== None): #如果检测框不带置信度
  63. return np.array([x[ 0]-w/ 2.,x[ 1]-h/ 2.,x[ 0]+w/ 2.,x[ 1]+h/ 2.]).reshape(( 1, 4)) #返回[x1,y1,x2,y2]
  64. else: #如果加测框带置信度
  65. return np.array([x[ 0]-w/ 2.,x[ 1]-h/ 2.,x[ 0]+w/ 2.,x[ 1]+h/ 2.,score]).reshape(( 1, 5)) #返回[x1,y1,x2,y2,score]
  66. class KalmanBoxTracker( object):
  67. """
  68. This class represents the internel state of individual tracked objects observed as bbox.
  69. """
  70. count = 0
  71. def __init__( self,bbox):
  72. """
  73. Initialises a tracker using initial bounding box. 使用初始边界框初始化跟踪器
  74. """
  75. #define constant velocity model #定义匀速模型
  76. self.kf = KalmanFilter(dim_x= 7, dim_z= 4) #状态变量是7维, 观测值是4维的,按照需要的维度构建目标
  77. self.kf.F = np.array([[ 1, 0, 0, 0, 1, 0, 0],[ 0, 1, 0, 0, 0, 1, 0],[ 0, 0, 1, 0, 0, 0, 1],[ 0, 0, 0, 1, 0, 0, 0],[ 0, 0, 0, 0, 1, 0, 0],[ 0, 0, 0, 0, 0, 1, 0],[ 0, 0, 0, 0, 0, 0, 1]])
  78. self.kf.H = np.array([[ 1, 0, 0, 0, 0, 0, 0],[ 0, 1, 0, 0, 0, 0, 0],[ 0, 0, 1, 0, 0, 0, 0],[ 0, 0, 0, 1, 0, 0, 0]])
  79. self.kf.R[ 2:, 2:] *= 10.
  80. self.kf.P[ 4:, 4:] *= 1000. #give high uncertainty to the unobservable initial velocities 对未观测到的初始速度给出高的不确定性
  81. self.kf.P *= 10. # 默认定义的协方差矩阵是np.eye(dim_x),将P中的数值与10, 1000相乘,赋值不确定性
  82. self.kf.Q[- 1,- 1] *= 0.01
  83. self.kf.Q[ 4:, 4:] *= 0.01
  84. self.kf.x[: 4] = convert_bbox_to_z(bbox) #将bbox转为 [x,y,s,r]^T形式,赋给状态变量X的前4位
  85. self.time_since_update = 0
  86. self. id = KalmanBoxTracker.count
  87. KalmanBoxTracker.count += 1
  88. self.history = []
  89. self.hits = 0
  90. self.hit_streak = 0
  91. self.age = 0
  92. def update( self,bbox):
  93. """
  94. Updates the state vector with observed bbox.
  95. """
  96. self.time_since_update = 0
  97. self.history = []
  98. self.hits += 1
  99. self.hit_streak += 1
  100. self.kf.update(convert_bbox_to_z(bbox))
  101. def predict( self):
  102. """
  103. Advances the state vector and returns the predicted bounding box estimate.
  104. """
  105. if((self.kf.x[ 6]+self.kf.x[ 2])<= 0):
  106. self.kf.x[ 6] *= 0.0
  107. self.kf.predict()
  108. self.age += 1
  109. if(self.time_since_update> 0):
  110. self.hit_streak = 0
  111. self.time_since_update += 1
  112. self.history.append(convert_x_to_bbox(self.kf.x))
  113. return self.history[- 1]
  114. def get_state( self):
  115. """
  116. Returns the current bounding box estimate.
  117. """
  118. return convert_x_to_bbox(self.kf.x)
  119. def associate_detections_to_trackers( detections,trackers,iou_threshold = 0.3): #用于将检测与跟踪进行关联
  120. """
  121. Assigns detections to tracked object (both represented as bounding boxes)
  122. Returns 3 lists of matches, unmatched_detections and unmatched_trackers
  123. """
  124. if( len(trackers)== 0): #如果跟踪器为空
  125. return np.empty(( 0, 2),dtype= int), np.arange( len(detections)), np.empty(( 0, 5),dtype= int)
  126. iou_matrix = np.zeros(( len(detections), len(trackers)),dtype=np.float32) # 检测器与跟踪器IOU矩阵
  127. for d,det in enumerate(detections):
  128. for t,trk in enumerate(trackers):
  129. iou_matrix[d,t] = iou(det,trk) #计算检测器与跟踪器的IOU并赋值给IOU矩阵对应位置
  130. matched_indices = linear_assignment(-iou_matrix) # 参考:https://blog.csdn.net/herr_kun/article/details/86509591 加上负号是因为linear_assignment求的是最小代价组合,而我们需要的是IOU最大的组合方式,所以取负号
  131. unmatched_detections = [] #未匹配上的检测器
  132. for d,det in enumerate(detections):
  133. if(d not in matched_indices[:, 0]): #如果检测器中第d个检测结果不在匹配结果索引中,则d未匹配上
  134. unmatched_detections.append(d)
  135. unmatched_trackers = [] #未匹配上的跟踪器
  136. for t,trk in enumerate(trackers):
  137. if(t not in matched_indices[:, 1]): #如果跟踪器中第t个跟踪结果不在匹配结果索引中,则t未匹配上
  138. unmatched_trackers.append(t)
  139. #filter out matched with low IOU 过滤掉那些IOU较小的匹配对
  140. matches = [] #存放过滤后的匹配结果
  141. for m in matched_indices: #遍历粗匹配结果
  142. if(iou_matrix[m[ 0],m[ 1]]<iou_threshold): #m[0]是检测器ID, m[1]是跟踪器ID,如它们的IOU小于阈值则将它们视为未匹配成功
  143. unmatched_detections.append(m[ 0])
  144. unmatched_trackers.append(m[ 1])
  145. else:
  146. matches.append(m.reshape( 1, 2)) #将过滤后的匹配对维度变形成1x2形式
  147. if( len(matches)== 0): #如果过滤后匹配结果为空,那么返回空的匹配结果
  148. matches = np.empty(( 0, 2),dtype= int)
  149. else: #如果过滤后匹配结果非空,则按0轴方向继续添加匹配对
  150. matches = np.concatenate(matches,axis= 0)
  151. return matches, np.array(unmatched_detections), np.array(unmatched_trackers) #其中跟踪器数组是5列的(最后一列是ID)
  152. class Sort( object):
  153. def __init__( self,max_age=1,min_hits=3):
  154. """
  155. Sets key parameters for SORT
  156. """
  157. self.max_age = max_age
  158. self.min_hits = min_hits
  159. self.trackers = []
  160. self.frame_count = 0
  161. def update( self,dets): #输入的是检测结果[x1,y1,x2,y2,score]形式
  162. """
  163. Params:
  164. dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
  165. Requires: this method must be called once for each frame even with empty detections. #每一帧都得调用一次,即便检测结果为空
  166. Returns the a similar array, where the last column is the object ID. #返回相似的数组,最后一列是目标ID
  167. NOTE: The number of objects returned may differ from the number of detections provided. #返回的目标数量可能与提供的检测数量不同
  168. """
  169. self.frame_count += 1 #帧计数
  170. #get predicted locations from existing trackers.
  171. trks = np.zeros(( len(self.trackers), 5)) # 根据当前所有卡尔曼跟踪器的个数创建二维零矩阵,维度为:卡尔曼跟踪器ID个数x 5 (这5列内容为bbox与ID)
  172. to_del = [] #存放待删除
  173. ret = [] #存放最后返回的结果
  174. for t,trk in enumerate(trks): #循环遍历卡尔曼跟踪器列表
  175. pos = self.trackers[t].predict()[ 0] #用卡尔曼跟踪器t 预测 对应物体在当前帧中的bbox
  176. trk[:] = [pos[ 0], pos[ 1], pos[ 2], pos[ 3], 0]
  177. if(np. any(np.isnan(pos))): #如果预测的bbox为空,那么将第t个卡尔曼跟踪器删除
  178. to_del.append(t)
  179. trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) #将预测为空的卡尔曼跟踪器所在行删除,最后trks中存放的是上一帧中被跟踪的所有物体在当前帧中预测的非空bbox
  180. for t in reversed(to_del): #对to_del数组进行倒序遍历
  181. self.trackers.pop(t) #从跟踪器中删除 to_del中的上一帧跟踪器ID
  182. matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks) #对传入的检测结果 与 上一帧跟踪物体在当前帧中预测的结果做关联,返回匹配的目标矩阵matched, 新增目标的矩阵unmatched_dets, 离开画面的目标矩阵unmatched_trks
  183. #update matched trackers with assigned detections
  184. for t,trk in enumerate(self.trackers): # 对卡尔曼跟踪器做遍历
  185. if(t not in unmatched_trks): #如果上一帧中的t还在当前帧画面中(即不在当前预测的离开画面的矩阵unmatched_trks中)
  186. d = matched[np.where(matched[:, 1]==t)[ 0], 0] #说明卡尔曼跟踪器t是关联成功的,在matched矩阵中找到与其关联的检测器d
  187. trk.update(dets[d,:][ 0]) #用关联的检测结果d来更新卡尔曼跟踪器(即用后验来更新先验)
  188. #create and initialise new trackers for unmatched detections #对于新增的未匹配的检测结果,创建并初始化跟踪器
  189. for i in unmatched_dets: #新增目标
  190. trk = KalmanBoxTracker(dets[i,:]) #将新增的未匹配的检测结果dets[i,:]传入KalmanBoxTracker
  191. self.trackers.append(trk) #将新创建和初始化的跟踪器trk 传入trackers
  192. i = len(self.trackers)
  193. for trk in reversed(self.trackers): #对新的卡尔曼跟踪器集进行倒序遍历
  194. d = trk.get_state()[ 0] #获取trk跟踪器的状态 [x1,y1,x2,y2]
  195. if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
  196. ret.append(np.concatenate((d,[trk. id+ 1])).reshape( 1,- 1)) # +1 as MOT benchmark requires positive
  197. i -= 1
  198. #remove dead tracklet
  199. if(trk.time_since_update > self.max_age):
  200. self.trackers.pop(i)
  201. if( len(ret)> 0):
  202. return np.concatenate(ret)
  203. return np.empty(( 0, 5))
  204. def parse_args():
  205. """Parse input arguments."""
  206. parser = argparse.ArgumentParser(description= 'SORT demo')
  207. parser.add_argument( '--display', dest= 'display', help= 'Display online tracker output (slow) [False]',action= 'store_true')
  208. args = parser.parse_args()
  209. return args
  210. if __name__ == '__main__':
  211. # all train
  212. sequences = [ 'PETS09-S2L1', 'TUD-Campus', 'TUD-Stadtmitte', 'ETH-Bahnhof', 'ETH-Sunnyday', 'ETH-Pedcross2', 'KITTI-13', 'KITTI-17', 'ADL-Rundle-6', 'ADL-Rundle-8', 'Venice-2']
  213. args = parse_args()
  214. display = args.display
  215. phase = 'train'
  216. total_time = 0.0
  217. total_frames = 0
  218. colours = np.random.rand( 32, 3) #used only for display
  219. if(display):
  220. if not os.path.exists( 'mot_benchmark'):
  221. print( '\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
  222. exit()
  223. plt.ion() #用于动态绘制显示图像
  224. fig = plt.figure()
  225. if not os.path.exists( 'output'):
  226. os.makedirs( 'output')
  227. for seq in sequences:
  228. mot_tracker = Sort() #create instance of the SORT tracker 创建Sort 跟踪实例
  229. seq_dets = np.loadtxt( 'data/%s/det.txt'%(seq),delimiter= ',') #load detections #加载检测结果
  230. with open( 'output/%s.txt'%(seq), 'w') as out_file:
  231. print( "Processing %s."%(seq))
  232. for frame in range( int(seq_dets[:, 0]. max())): #确定视频序列总帧数,并进行for循环
  233. frame += 1 #detection and frame numbers begin at 1 #由于视频序列帧数是从1开始的,因此加1
  234. dets = seq_dets[seq_dets[:, 0]==frame, 2: 7] #提取检测结果中的[x1,y1,w,h,score]到dets
  235. dets[:, 2: 4] += dets[:, 0: 2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] 将dets中的第2,3列的数加上第0,1列的数后赋值给2,3列;
  236. total_frames += 1 #总帧数累计
  237. if(display): #如果要求显示结果
  238. ax1 = fig.add_subplot( 111, aspect= 'equal')
  239. fn = 'mot_benchmark/%s/%s/img1/%06d.jpg'%(phase,seq,frame) #原图像路径名
  240. im =io.imread(fn) #加载图像
  241. ax1.imshow(im) #显示图像
  242. plt.title(seq+ ' Tracked Targets')
  243. start_time = time.time()
  244. trackers = mot_tracker.update(dets) #sort跟踪器更新
  245. cycle_time = time.time() - start_time #sort跟踪器耗时
  246. total_time += cycle_time #sort跟踪器总共耗费时间
  247. for d in trackers:
  248. print( '%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[ 4],d[ 0],d[ 1],d[ 2]-d[ 0],d[ 3]-d[ 1]),file=out_file) #打印: frame,ID,x1,y1,x2,y2,1,-1,-1,-1
  249. if(display): #如果显示,将目标检测框画上
  250. d = d.astype(np.int32)
  251. ax1.add_patch(patches.Rectangle((d[ 0],d[ 1]),d[ 2]-d[ 0],d[ 3]-d[ 1],fill= False,lw= 3,ec=colours[d[ 4]% 32,:]))
  252. ax1.set_adjustable( 'box-forced')
  253. if(display):
  254. fig.canvas.flush_events()
  255. plt.draw()
  256. ax1.cla()
  257. print( "Total Tracking took: %.3f for %d frames or %.1f FPS"%(total_time,total_frames,total_frames/total_time))
  258. if(display):
  259. print( "Note: to get real runtime results run without the option: --display")

源码中是对视频图像序列的检测结果进行离线操作,用的检测器第fasrer-rcnn,检测的结果存放在data/视频名/det.txt中。

 

 

参考(这篇文章写得真的不错):【算法分析】SORT/Deep SORT 物体跟踪算法解析

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值