基于opencv_ncnn的多目标人头跟踪工程

本文介绍了一个基于opencv3.4.9和ncnn的多目标人头跟踪项目,采用MTCNN进行人头检测,并使用光流中值法进行单目标跟踪。在CPU端可实时运行,源码包含核心Main文件`main.cpp`和跟踪算法文件`colotracker.cpp`。
摘要由CSDN通过智能技术生成

        讲在前面的话:之后的博客将分为付费模式和非付费模式,使用付费模式阅读的主要原因有两个,一个是可以有效督促我去完成各类文章;二是可以赚个辛苦费用。

        本人承诺,在付费专栏中所使用的代码均为本人原创,而且均可以实时在pc端运行,个人不喜欢研究非实时算法。当然后期可能会更新基于GPU版本的工程项目,敬请期待。

系统:ubuntu18.04

依赖环境:opencv3.4.9+ncnn

       首先考虑到网上已经有很多教程讲述环境配置。这里ncnn和opencv如何配置不在多讲述。本文主要是基于opencv和ncnn进行多目标人头跟踪,目前在cpu端可以实时运行,主要的思路如下:

如上图所示,是整个工程项目的流程图,下面将从源码开始介绍:

main.cpp

#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <getopt.h>
#include <opencv2/opencv.hpp>
#include <forward_list>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include <sqlite3.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/dnn.hpp>
#include <stdio.h>
#include "colotracker.h"
#include <dirent.h>
#include "track.h"
#include <time.h>
#include <unistd.h>
#include <error.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <pthread.h>
#include <linux/videodev2.h>
#include <sys/mman.h>
#include <iomanip>
#include <string>
#include <omp.h>
#include "mtcnn.h"
#include "mtcnn_detect.h"

using namespace std;
using namespace cv;
using namespace cv::ml;
using namespace cv::dnn;

#define CLEAR(x) memset(&(x), 0, sizeof(x))

#define IMAGEWIDTH  640
#define IMAGEHEIGHT 480

long int ID        =0;


size_t inWidth       = 150;
size_t inHeight      = 150;
double inScaleFactor = 0.007843f;
const float meanVal  = 127.5;
float min_confidence = 0.5;

const char* classNames[] = {"background","head"};
int scale  =1	;
int scale2 =1;


Rect box; // tracking object
bool Detection    = false;

Point centrePoint;

cv::Mat src;
cv::Mat dst;

void IplToMat(IplImage * src ,Mat dst)
{ 
   int height = src->height;
   int width  = src->width;
   int wStep  = src->widthStep;

   uchar * data = (uchar *)src->imageData;


   for(int i=0;i<height;i++)	
   {
     for(int j=0;j<width;j++)
     {
        dst.at<Vec3b>(i,j)[2] = data[i*wStep+3*j+2];
	    dst.at<Vec3b>(i,j)[1] = data[i*wStep+3*j+1];
	    dst.at<Vec3b>(i,j)[0] = data[i*wStep+3*j+0];
     }
   }
  
}


//chongdielvjisuan
float RectMatching(Rect box1, Rect box2)
{
	//buxiangjiao
	if (box1.x > box2.x + box2.width) 
	{
		return 0.0; 
	}
	if (box1.y > box2.y + box2.height) 
	{ 
		return 0.0;
	}
	if (box1.x + box1.width < box2.x)  
	{
		return 0.0; 
	}
	if (box1.y + box1.height < box2.y) 
	{
		return 0.0; 
	}
	float colInt = min(box1.x + box1.width,  box2.x + box2.width ) - max(box1.x, box2.x);
	float rowInt = min(box1.y + box1.height, box2.y + box2.height) - max(box1.y, box2.y);
	float intersection = colInt * rowInt;
	float area1 = box1.width*box1.height;
	float area2 = box2.width*box2.height;
	return max(intersection/area1,intersection/area2);
}

int main()
{
    double dur = 0;
    clock_t start2, end2;
    start2 = clock();	

    
    int flg = 0;
    int ch;
    bool flg_track = true;
    VideoCapture capture(0); 

    forward_list<AnObject> trackers;

    cv::Mat im;
    capture >> im;

    const int IMW = im.cols;
    const int IMH = im.rows;

    init_mtcnn(IMW, IMH);
    VideoWriter out;
    out.open("result.avi", VideoWriter::fourcc('M', 'P', '4', '2'), 25.0, cv::Size(640,480));

    
    int Num = 0;
    long int SaveImage =0;
    string SaveVideo;
    MtcnnDetector detector("../models");
    while( 1 )
    {
        
        Num++;
        Mat img;
        if (!capture.read(img))
	    {
	        cout << " can not open the image " << endl;
	        return -1;
	    }

	    resize(img, img, Size(640, 480), 0, 0, INTER_LINEAR);
        Mat imgTemp ;
        resize(img,imgTemp,Size(img.cols/scale2,img.rows/scale2),0,0,1);
        centrePoint.x = img.cols/2;
        centrePoint.y = img.rows/2;

	    Mat Temp ;
        resize(img, Temp, Size(img.cols/scale, img.rows/scale), 0, 0, INTER_LINEAR);
	
	    vector<Rect> Persons;
	    vector<Rect> PersonsTemp;   
	    Persons.clear();
        PersonsTemp.clear();   
        long int TempNum=0;
        clock_t start, end;
	    start = clock();
  	    std::vector<obj_info> detectedobj_info;
        if(  Num%1==0)
        { 
             run_mtcnn(Temp, detectedobj_info);
        }  
        else
        {
            detectedobj_info.clear();    
        }

	    for(int i=0;i<detectedobj_info.size();i++)
	    {
	        Rect temp;
	        temp.x =detectedobj_info[i].bbox.xmin;
            temp.y =detectedobj_info[i].bbox.ymin;
	        temp.width =detectedobj_info[i].bbox.xmax - temp.x;
	        temp.height=detectedobj_info[i].bbox.ymax - temp.y;
	        PersonsTemp.push_back(temp);
	    }
        end = clock();
        cout << "time" << double(end-start)/CLOCKS_PER_SEC*1000<< "ms" << endl;

        bool Temp11 =false;
        //#pragma omp parallel for
        for (int i = 0; i<PersonsTemp.size(); i++)
	    {
                TempNum++;   
	            Rect r = PersonsTemp[i];
                int j=0;
                
		        for (; j < PersonsTemp.size(); j++)
                  
              	if (j != i && (r&PersonsTemp[j])==r)
			            break;
		        if (j == PersonsTemp.size() && r.width>20)
		        {
		            Persons.push_back(r);
            	} 
	    }
        PersonsTemp.clear();
        imshow("check",img);
	
        if(flg_track)
        {
            // 添加新目标,新目标和已跟踪目标匹配
            //#pragma omp parallel for
            cout<<"size---"<<Persons.size()<<endl;
            for(size_t i = 0; i <   Persons.size(); ++i)
            {
                bool     is_has  = false;
                float    percent = 0.0;
                Rect     trackbox;
                cv::Rect detection;
                float score =-0.0;
                int xuhao;
                // 已跟踪,则继续下一个
                // find_if(trackers.begin(), trackers.end(), []());`
                int num =0;
                for(auto iter = trackers.begin(); iter != trackers.end(); ++iter)
                {
                    num++;
                    trackbox =  Rect(iter->box_now.x,iter->box_now.y,iter->box_now.width,iter->box_now.height);
                    cout<<"box_now"<<trackbox<<endl;
                    percent  =  RectMatching(Persons[i], trackbox);
                    cout<<"percent"<<percent<<endl;
                    //判断每一个检测框是否与所有的跟踪框有重叠,取重叠面积最大的跟踪框
                    if( percent >= score)
                    {
                        score       =percent;
                        detection   =Persons[i];
                    }
                    else{
                        continue;
                    }
                }
                //如果该检测框与跟踪框最大的重叠面积超过0.3,则判定为跟踪存在,此时用检测框代替跟踪框
                if( score >0.2 )
                {
                    is_has = true;   
                }
               // 已经在跟踪之列,则不添加入新的跟踪
                if(is_has)
                { 
                    //trackers.updateRect(detection);
                    continue; 
                }
                printf("try to add tracker\n");
                printf("added tracker\n\n");
                // 未跟踪,则添加
                AnObject obj;
                ID++; 
                obj.id =ID;
                cout<<"id"<<"\t"<<obj.id<<endl;
                if( ID>5000 )
                {
                   ID= 0;
                } 
                //cout<<"mubiao"<<Persons[i]<<endl;
                //cout<<"check1"<<endl;
                obj.init(imgTemp,Persons[i]);
                //cout<<"check2"<<endl;
                trackers.push_front(obj);

            }
            Persons.clear();

            //遍历跟踪,更新
            int PSRtemp =0;
            for(forward_list<AnObject>::iterator iter = trackers.begin(); iter != trackers.end(); ++iter)
            { 
                clock_t start,end;
                start =clock();
                iter->update(imgTemp);
                end =clock();
                cout<<"Tracking Time===="<<double(end-start)/CLOCKS_PER_SEC*1000<<"ms"<<endl;
                int cx = iter->cx;
                int cy = iter->cy;

                //cout<<"cx="<<cx<<"\t"<<"cy="<<cy<<endl;
                iter->path.push_back(cv::Point2d(cx, cy));
                if(cx > imgTemp.cols-iter->edge || cx < iter->edge || cy < iter->edge || cy > imgTemp.rows - iter->edge || iter->lost)
                {
                    iter->out = true;
                } 
            }
          

            // 删除已经出视野范围的(out = true) 和 跟踪失败的 (isfound = false)
            //trackers.remove_if( [](AnObject &elem) {return elem.out || !elem.isfound ;});
            // 目标框显示
            for(forward_list<AnObject>::iterator iter = trackers.begin(); iter != trackers.end(); ++iter)
            {    
                string IDnum   = to_string(iter->id);
		        int Num = iter->id%8;
		        cv::Scalar color;
		        if( Num  ==0 )
		        {
		            color= cv::Scalar(255,0,0);
		        }    
		        else if(Num ==1 )
		        {
		            color= cv::Scalar(0,255,255);
		        }
		        else if(Num ==2 )
		        {
		            color= cv::Scalar(0,255,0);
		        }
		        else if(Num ==3 )
		        {
		            color= cv::Scalar(255,255,255);
		        }
		        else if(Num ==4)
		        {
		            color= cv::Scalar(255,255,0);	
		        }
		        else if(Num ==5)
                {
                   color=cv::Scalar(255,0,255);
                }
                else if(Num ==6)
                {
                   color=cv::Scalar(80,127,255);
                }
                else
                {
                   color=cv::Scalar(143,143,188);
                }
     
                putText(img, IDnum, Point(iter->box_now.x*scale2,iter->box_now.y*scale2+20),CV_FONT_HERSHEY_COMPLEX, 2, color,1);
                Point Center=Point(iter->box_now.x*scale2+iter->box_now.width/2*scale2,iter->box_now.y*scale2+iter->box_now.height/2*scale2);
                if(  iter->path.size() < 2 )
                {
                   continue;
                }
                
                rectangle(img, Rect(iter->box_now.x*scale2,iter->box_now.y*scale2,iter->box_now.width*scale2,iter->box_now.height*scale2), color,3);

                int TempX  = 0;
                int TempY  = 0;
                int TempXX = 0;  
                int TempYY = 0;
                int num    = 0;
                line(img,Point( iter->path[iter->path.size()-2].x*2,iter->path[iter->path.size()-2].y*2),Point(iter->path[iter->path.size()-1].x*2,iter->path[iter->path.size()-1].y*2),color,4,CV_AA);
               
            }   
        }
        end = clock();
        size_t size =std::distance(trackers.begin(),trackers.end());
        trackers.remove_if( [](AnObject &elem) {return elem.out || !elem.isfound || elem.stop;});

      
        imshow("showImage",img);
	    cvWaitKey(1);
	    out<<img;


		start2 = clock();
	}

}

这是整个工程的核心Main文件,其中多目标跟踪器使用的是一个前向列表forward_list<AnObject> trackers,而AnObject则表示每一个子跟踪器,这样便于删掉跟踪器。而人头检测的方法,这里使用的MTCNN检测方法,训练的人头数据是自建数据集,需要的可以私聊。工程将mtcnn检测的人头目标送人到跟踪器中,与现有的跟踪器中的框进行iou匹配,若匹配上,则认为新检测的框已经被跟踪了,此时可以用新的检测框替换跟踪框作为新的跟踪框;若无法匹配,说明新检测框将作为新的跟踪目标被跟踪。

而单目标跟踪器则使用光流中值法进行跟踪,其跟踪算法代码如下:

colotracker.cpp

#include "colotracker.h"
#include <iostream>
#include <fstream> 
#include <vector>
#include "opencv/cv.h"
#include "opencv/ml.h"
#include "opencv/highgui.h"
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

using namespace std;
using namespace cv;


/* -------------------------CODE-----------------------------*/
double ColorTracker::euclid_dist(const Point2f* point1, const Point2f* point2) 
{
	/*
	 * This function calculates the euclidean distance between 2 points
	*/
	double distance, xvec, yvec;
	xvec = point2->x - point1->x;
	yvec = point2->y - point1->y;
	distance = sqrt((xvec * xvec) + (yvec * yvec));
	return distance;
}



void ColorTracker::pairwise_dist(const Point2f* features, double *edist, int npoin) 
{
	/*
	 * calculate m x n euclidean pairwise distance matrix.
	*/
	for (int i = 0; i < npoin; i++) 
	{
		for (int j = 0; j < npoin; j++) 
		{
			int ind = npoin*i + j;
			edist[ind] = euclid_dist(&features[i],&features[j]);
		}
	}
}


void ColorTracker::ncc_filter(cv::Mat frame1, cv::Mat frame2, Point2f *prev_feat, Point2f *curr_feat, 
				int npoin, int method, cv::Mat rec0, cv::Mat  rec1, cv::Mat res, int *ncc_pass) 
{
	/*
	 * Normalized Cross Correlation Filter 	 
	*/
	
	int filt = npoin/2;
	vector<float> ncc_err (npoin,0.0);

	for (int i = 0; i < npoin; i++) 
	{
		//从原图像中的特征点周围提取一个感兴趣的矩形区域图像
		getRectSubPix( frame1, Size(11,11),prev_feat[i], rec0);
		getRectSubPix( frame2, Size(11,11),curr_feat[i], rec1 );
		//模板匹配函数
		//HI_MPI_IVE_NCC
		//对获取得到的两个矩形框区域互相关匹配
		matchTemplate( rec0,rec1, res, method );
		ncc_err[i] = res.at<float>(0,0); 
	}
	vector<float>err_copy (ncc_err);
	sort(ncc_err.begin(), ncc_err.end());
	//使用ncnn计算的互相关进行验证,过滤掉bounding box中质量最差的点的50%
	median = (ncc_err[filt]+ncc_err[filt-1])/2.;
	for(int i = 0; i < npoin; i++) 
	{
		if (err_copy[i] >= median) 
		{
			ncc_pass[i] = 1;		
		}
		else 
		{
			ncc_pass[i] = 0;
		}
	}	
}

// tracking box mouse callback

int cmp( const void *a , const void *b ){ 
	return *(double *)a > *(double *)b ? 1 : -1; 
}


void ColorTracker::fb_filter(const Point2f* prev_features, const Point2f* backward_features, 
const Point2f* curr_feat, int *fb_pass, const int npoin) 
{
	/*
	 * This function implements forward-backward error filtering
	*/
	//vector<double> euclidean_dist (npoin,0.0);
	double euclidean_dist[npoin];
	double err_copy[npoin];

	memcpy(err_copy,euclidean_dist,sizeof(euclidean_dist));
	
	int filt = npoin/2;
	for(int i = 0; i < npoin; i++) 
	{
		//比较原始点和反向点之间的欧式距离
		euclidean_dist[i] = euclid_dist(&prev_features[i], &backward_features[i]);
        //cout<<"euclidean_dist[i]==="<<i<<"===="<<euclidean_dist[i]<<endl;
	}
	
	//vector<double> err_copy (euclidean_dist);
	//qsort(in,100,sizeof(in[0]),cmp);
	qsort(euclidean_dist,npoin,sizeof(euclidean_dist[0]),cmp);
	//use the STL sort algorithm to filter results
	//对该欧式距离进行排序,因为理论上如果所有的点没有误差,那么点坐标的欧式距离应该为0
	//sort(euclidean_dist.begin(), euclidean_dist.end());
	double median = (euclidean_dist[filt]+euclidean_dist[filt-1])/2.;
	//对点进行排序后,认为大于中值距离的点是坏点,相当于一半的点偏离原始点,说明这些点此时光流跟踪失败
	for(int i = 0; i < npoin; i++) 
	{
		if (err_copy[i] <= median) 
		{
			fb_pass[i] = 1;		
		}
		else 
		{
			fb_pass[i] = 0;
		}
	}
}


void ColorTracker::bbox_move(const Point2f* prev_feat, const Point2f* curr_feat, const int npoin,
				double &xmean, double &ymean)
 {
	/*
	 * Calculate bounding box motion. 
	 */
	//计算的是x方向和y方向上的平均偏移量
	//vector<double> xvec (npoin,0.0);
	//vector<double> yvec (npoin,0.0);
	double xvec[npoin];
	double yvec[npoin];
	for (int i = 0; i < npoin; i++) 
	{
		xvec[i] = curr_feat[i].x - prev_feat[i].x;
		yvec[i] = curr_feat[i].y - prev_feat[i].y;
	}	
	qsort(xvec,npoin,sizeof(xvec[0]),cmp);
	qsort(yvec,npoin,sizeof(yvec[0]),cmp);
	//sort(xvec.begin(), xvec.end());
	//sort(yvec.begin(), yvec.end());
	
	xmean = xvec[npoin/2];
	ymean = yvec[npoin/2];		//The final mostion is that of the mean of all the points. 
}


void ColorTracker::init(cv::Mat & img, int x1, int y1, int x2, int y2)
{
    bbox_x      =x1;
    bbox_y      =y1;
    bbox_width  =x2-x1;
    bbox_height =y2-y1;
    
    //微调一下目标人脸的宽度
    //float FaceWidth     = bbox_height;
    //float centerX       = (x1+x2)/2.0;
    //float centerY       = (y1+y2)/2.0;
    //bbox_x              = centerX-FaceWidth*0.38;
    //bbox_width          = FaceWidth*0.76;

	//保存第一帧图像
	cvtColor(img,frame1_1C,CV_BGRA2GRAY);
}


cv::Rect ColorTracker::track(cv::Mat & img,double x1, double y1, double x2, double y2)
{
        //生成第二帧图像
        //frame = cvCreateImage(cvSize(img.cols,img.rows),IPL_DEPTH_8U,3); // The Image that is displayed finally
        
        ///Mat2Ipl(img,frame);
		//cvConvertImage(frame,frame2_1C,0);
		//frame2_1C =cvCloneImage(frame);
		//frame2_1C =img.clone();
		cvtColor(img,frame2_1C,CV_BGRA2GRAY);

		cv::Mat  rec0(winsize,winsize,CV_8UC1);
		cv::Mat  rec1(winsize,winsize,CV_8UC1);
		cv::Mat  res(1,1,CV_32FC1);


        //IplImage *rec0 = cvCreateImage( cvSize(winsize, winsize), 8, 1 );
		//IplImage *rec1 = cvCreateImage( cvSize(winsize, winsize), 8, 1 );
		//IplImage *res  = cvCreateImage( cvSize( 1, 1 ), IPL_DEPTH_32F, 1 );	//  for the NCC function
	
		/* This array will contain the locations of the points from frame 1 in frame 2. */
		vector<Point2f> frame1_features(npoints);
		vector<Point2f> frame2_features(npoints);
		vector<Point2f> FB_features(npoints);
	
		// The i-th element of this array will be non-zero if and only if the i-th feature of
	 	// frame 1 was found in frame 2.
	 
		char optical_flow_found_feature[npoints];    		//features in first frame
		char optical_flow_found_feature2[npoints];			//features in second frame
		float optical_flow_feature_error[npoints];			//error in Optical Flow 
		vector<int> fb_pass(npoints);						//Points that passed fb
		vector<int> ncc_pass(npoints);						//Points that passed ncc

        for(int i = 0;i<10;i++)
		{
			for(int j = 0;j<10;j++)
			{
				int l = i*10 + j;
				
				frame1_features[l].x = bbox_x + (bbox_width/10)*j  + (bbox_width/20);
				frame1_features[l].y = bbox_y + (bbox_height/10)*i + (bbox_height/20);
			}
		}
        //金字塔图像
        // Pyr Lucas kanade Optical Flow
		TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
        //计算金字塔光流点,HI_MPI_IVE_LKOpticalFlow这是海思对应的函数,计算前向和反向光流点
        clock_t start1,end1;
		start1 =clock();
		vector<uchar> status;
        vector<float> err;
		calcOpticalFlowPyrLK(frame1_1C, frame2_1C, frame1_features, frame2_features, status,err,Size(7,7),3, termcrit, 0, 0.001);
		calcOpticalFlowPyrLK(frame2_1C, frame1_1C, frame2_features, FB_features,     status,err,Size(7,7),3, termcrit, 0, 0.001);
		
        end1 =clock();
        cout<<"光流时间="<<double(end1-start1)/CLOCKS_PER_SEC*1000<<"毫秒"<<endl;
        double xmean = 0;
		double ymean = 0;

        clock_t start2,end2;
		start2 =clock();
        fb_filter(&frame1_features[0], &FB_features[0], &frame2_features[0], &fb_pass[0], npoints);
        end2 =clock();
        cout<<"距离滤波时间="<<double(end2-start2)/CLOCKS_PER_SEC*1000<<"毫秒"<<endl;


        //归一化的相关性系数匹配方法,正值表示匹配的结果较好,负值则表示匹配的效果较差,也是值越大,匹配效果也好。
		ncc_filter(frame1_1C,frame2_1C,&frame1_features[0],&frame2_features[0],npoints,CV_TM_CCOEFF_NORMED, rec0, rec1, res, &ncc_pass[0]);
        end2 =clock();
        cout<<"滤波时间="<<double(end2-start2)/CLOCKS_PER_SEC*1000<<"毫秒"<<endl;

        int pcount_prev = 1;
		pcount = 0;

        for(int i = 0; i<npoints;i++)
		{
			if(fb_pass[i] && ncc_pass[i] && (frame2_features[i].x>bbox_x) && (frame2_features[i].y>bbox_y) && (frame2_features[i].x < bbox_x + bbox_width ) && (frame2_features[i].y < bbox_y +bbox_height) )
			{
				//总的好点数
				pcount++;
			}
		}
        cout<<"---pcount---"<<pcount<<endl;
        if((median)<0.37 ){
            lost =true;
            cv::Rect track_box;
            track_box.x         =bbox_x;
            track_box.y         =bbox_y;
            track_box.width     =bbox_width;
            track_box.height    =bbox_height; 
            return  track_box;
        }

        if(pcount == 0) 
		{  
			pta.x = 0; pta.y = 0;
            lost =true;
            cv::Rect track_box;
            track_box.x         =bbox_x;
            track_box.y         =bbox_y;
            track_box.width     =bbox_width;
            track_box.height    =bbox_height; 
            return  track_box;
		} // If no points detected, run haar again
        //cout<<"check2016"<<endl;
        vector<Point2f> curr_features2(pcount),prev_features2(pcount);
		int j = 0;
		
		for( int i = 0; i< npoints; i++)
		{
			if(fb_pass[i] && ncc_pass[i] && (frame2_features[i].x>bbox_x) && (frame2_features[i].y>bbox_y) && (frame2_features[i].x < bbox_x + bbox_width ) && (frame2_features[i].y < bbox_y +bbox_height) )
			{
				curr_features2[j] = frame2_features[i];
				prev_features2[j] = frame1_features[i];
				j++;
			}
		}
		
		int n2 = pcount*pcount;
        
        vector<double> pdist_prev(n2),pdist_curr(n2),pdiv(n2);
		//计算的是原始的点对之间的距离矩阵,相当于这些点之间的拓扑结构一样
		//第二个则是计算的第二个图距离矩阵,相当于第二帧图像上特征点的拓扑结构
		pairwise_dist(&prev_features2[0],&pdist_prev[0],pcount); // Find distance btw all points
		pairwise_dist(&curr_features2[0],&pdist_curr[0],pcount);

        //Divide corresponding distances to find the amount of scaling 
		//计算尺寸
		//(1)对于每一对点,都有一个比例,是当前点和前一阵点的距离比例。
		//(2)边界框的尺寸变化被定义为这些比例的中值。
		for (int i = 0; i < n2; i++) 
		{
			if (pdist_prev[i] > 0.0) 
			{
				pdiv[i] = pdist_curr[i]/pdist_prev[i];
			}
		}
		sort(pdiv.begin(),pdiv.end());
	
		double box_scale;
		box_scale = pdiv[n2/2]; // Scaling set to the median of all values
		
		/*
		* Bounding Box is moved using the points that were able to pass FB and NCC 
		*/	
       	cout<<"bbox_move1"<<endl;   
		bbox_move(&prev_features2[0],&curr_features2[0],pcount,xmean,ymean);
        cout<<"bbox_move2"<<endl;
		//跟踪后的矩形框的坐标
		bbox_x          = bbox_x + (xmean) - bbox_width*(box_scale - 1.)/2.;
		bbox_y          = bbox_y + (ymean) - bbox_height*(box_scale - 1.)/2.;
		bbox_width      = bbox_width * (box_scale);
		bbox_height     = bbox_height * (box_scale);

        cv::Rect track_box;
        track_box.x         =bbox_x;
        track_box.y         =bbox_y;
        track_box.width     =bbox_width;
        track_box.height    =bbox_height;


        //超出图像范围内,重新开始检测
		if( (track_box.x+track_box.width/2 >620) || (track_box.y+track_box.height/2> 470)|| (track_box.x+track_box.width/2<20) || ( track_box.y+track_box.height/2<20) ) 
		{ 
			pta.x = 0; pta.y = 0; 
            lost =true;
		}
        //将当前帧赋值给第一帧图像
		cvtColor(img,frame1_1C,CV_BGRA2GRAY);
        return track_box;
}

如上图所示,计算检测框目标后,作为初始目标计算其光流点,然后在下一帧图像中寻找光流坐标点,判断是否光流匹配。

获取完整的代码加我微信:JackStock

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

开始写点什么

无论大小,多多益善

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值