KITTI tracking 评估代码 python

KITTI tracking evaluation code in python

#!/usr/bin/env python
# encoding: utf-8
"""
    function that does the evaluation
    input:
      - result_sha (sha key where the results are located
      - mail (messenger object for output messages sent via email and to cout)
    output:
      - True if at least one of the sub-benchmarks could be processed successfully
      - False otherwise
    data:
      - at this point the submitted files are located in results/<result_sha>/data
      - the results shall be saved as follows
        -> summary statistics of the method: results/<result_sha>/stats_task.txt
           here task refers to the sub-benchmark (e.g., um_lane, uu_road etc.)
           file contents: numbers for main table, format: %.6f (single space separated)
           note: only files with successful sub-benchmark evaluation must be created
        -> detailed results/graphics/plots: results/<result_sha>/subdir
           with appropriate subdir and file names (all subdir's need to be created)
"""

import sys,os,copy,math
from munkres import Munkres
from collections import defaultdict
try:
    from ordereddict import OrderedDict # can be installed using pip
except:
    from collections import OrderedDict # only included from python 2.7 on

import mailpy

class tData:
    """
        Utility class to load data.
    """
    
    def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
                 obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
                 X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
        """
            Constructor, initializes the object given the parameters.
        """
        
        # init object data
        self.frame      = frame
        self.track_id   = track_id
        self.obj_type   = obj_type
        self.truncation = truncation
        self.occlusion  = occlusion
        self.obs_angle  = obs_angle
        self.x1         = x1
        self.y1         = y1
        self.x2         = x2
        self.y2         = y2
        self.w          = w
        self.h          = h
        self.l          = l
        self.X          = X
        self.Y          = Y
        self.Z          = Z
        self.yaw        = yaw
        self.score      = score
        self.ignored    = False
        self.valid      = False
        self.tracker    = -1

    def __str__(self):
        """
            Print read data.
        """
        
        attrs = vars(self)
        return '\n'.join("%s: %s" % item for item in attrs.items())

class trackingEvaluation(object):
    """ tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
             MOTA	- Multi-object tracking accuracy in [0,100]
             MOTP	- Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
             MOTAL	- Multi-object tracking accuracy in [0,100] with log10(id-switches)

             id-switches - number of id switches
             fragments   - number of fragmentations

             MT, PT, ML	- number of mostly tracked, partially tracked and mostly lost trajectories

             recall	        - recall = percentage of detected targets
             precision	    - precision = percentage of correctly detected targets
             FAR		    - number of false alarms per frame
             falsepositives - number of false positives (FP)
             missed         - number of missed targets (FN)
    """

    def __init__(self, t_sha, gt_path="./tools/eval_kitti_track/data/tracking",\
        split_version='', min_overlap=0.5, max_truncation = 0, min_height = 25, 
        max_occlusion = 2, mail=None, cls="car"):
        # get number of sequences and
        # get number of frames per sequence from test mapping
        # (created while extracting the benchmark)
        filename_test_mapping = "./tools/eval_kitti_track/data/tracking/" + \
          "evaluate_tracking{}.seqmap".format(split_version)
        self.n_frames         = []
        self.sequence_name    = []
        with open(filename_test_mapping, "r") as fh:
            for i,l in enumerate(fh):
                fields = l.split(" ")
                self.sequence_name.append("%04d" % int(fields[0]))
                self.n_frames.append(int(fields[3]) - int(fields[2])+1)
        fh.close()
        self.n_sequences = i+1

        # mail object
        self.mail = mail

        # class to evaluate, i.e. pedestrian or car
        self.cls = cls

        # data and parameter
        if 'half' in split_version:
          self.gt_path = os.path.join(
              gt_path, 'label_02_{}'.format(split_version))
        else:
          self.gt_path = os.path.join(gt_path, "label_02")
        self.t_sha             = t_sha
        self.t_path            = os.path.join(t_sha)
        
        # statistics and numbers for evaluation
        self.n_gt              = 0 # number of ground truth detections minus ignored false negatives and true positives
        self.n_igt             = 0 # number of ignored ground truth detections
        self.n_gts             = [] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
        self.n_igts            = [] # number of ground ignored truth detections PER SEQUENCE
        self.n_gt_trajectories = 0
        self.n_gt_seq          = []
        self.n_tr              = 0 # number of tracker detections minus ignored tracker detections
        self.n_trs             = [] # number of tracker detections minus ignored tracker detections PER SEQUENCE
        self.n_itr             = 0 # number of ignored tracker detections
        self.n_itrs            = [] # number of ignored tracker detections PER SEQUENCE
        self.n_igttr           = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
        self.n_tr_trajectories = 0
        self.n_tr_seq          = []
        self.MOTA              = 0
        self.MOTP              = 0
        self.MOTAL             = 0
        self.MODA              = 0
        self.MODP              = 0
        self.MODP_t            = []
        self.recall            = 0
        self.precision         = 0
        self.F1                = 0
        self.FAR               = 0
        self.total_cost        = 0
        self.itp               = 0 # number of ignored true positives
        self.itps              = [] # number of ignored true positives PER SEQUENCE
        self.tp                = 0 # number of true positives including ignored true positives!
        self.tps               = [] # number of true positives including ignored true positives PER SEQUENCE
        self.fn                = 0 # number of false negatives WITHOUT ignored false negatives
        self.fns               = [] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
        self.ifn               = 0 # number of ignored false negatives
        self.ifns              = [] # number of ignored false negatives PER SEQUENCE
        self.fp                = 0 # number of false positives
                                   # a bit tricky, the number of ignored false negatives and ignored true positives 
                                   # is subtracted, but if both tracker detection and ground truth detection
                                   # are ignored this number is added again to avoid double counting
        self.fps               = [] # above PER SEQUENCE
        self.mme               = 0
        self.fragments         = 0
        self.id_switches       = 0
        self.MT                = 0
        self.PT                = 0
        self.ML                = 0
        
        self.min_overlap       = min_overlap # minimum bounding box overlap for 3rd party metrics
        self.max_truncation    = max_truncation # maximum truncation of an object for evaluation
        self.max_occlusion     = max_occlusion # maximum occlusion of an object for evaluation
        self.min_height        = min_height # minimum height of an object for evaluation
        self.n_sample_points   = 500
        
        # this should be enough to hold all groundtruth trajectories
        # is expanded if necessary and reduced in any case
        self.gt_trajectories            = [[] for x in range(self.n_sequences)]
        self.ign_trajectories           = [[] for x in range(self.n_sequences)]

    def createEvalDir(self):
        """
            Creates directory to store evaluation results and data for visualization.
        """
        
        # self.eval_dir = os.path.join("./results/", self.t_sha, "eval", self.cls)
        self.eval_dir = os.path.join(self.t_sha, "eval", self.cls)
        if not os.path.exists(self.eval_dir):
            print("create directory:", self.eval_dir,)
            os.makedirs(self.eval_dir)
            print("done")

    def loadGroundtruth(self):
        """
            Helper function to load ground truth.
        """
        
        try:
            self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
        except IOError:
            return False
        return True

    def loadTracker(self):
        """
            Helper function to load tracker data.
        """
        
        try:
            if not self._loadData(self.t_path, cls=self.cls, loading_groundtruth=False):
                return False
        except IOError:
            return False
        return True

    def _loadData(self, root_dir, cls, min_score=-1000, loading_groundtruth=False):
        """
            Generic loader for ground truth and tracking data.
            Use loadGroundtruth() or loadTracker() to load this data.
            Loads detections in KITTI format from textfiles.
        """
        # construct objectDetections object to hold detection data
        t_data  = tData()
        data    = []
        eval_2d = True
        eval_3d = True

        seq_data           = []
        n_trajectories     = 0
        n_trajectories_seq = []
        for seq, s_name in enumerate(self.sequence_name):
            i              = 0
            filename       = os.path.join(root_dir, "%s.txt" % s_name)
            f              = open(filename, "r")

            f_data         = [[] for x in range(self.n_frames[seq])] # current set has only 1059 entries, sufficient length is checked anyway
            ids            = []
            n_in_seq       = 0
            id_frame_cache = []
            for line in f:
                # KITTI tracking benchmark data format:
                # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
                line = line.strip()
                fields            = line.
  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值